code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import datasets
UpperCAmelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :List[Any] ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
class __lowerCAmelCase :
def __init__( self ) -> None:
"""simple docstring"""
a__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
a__ : int = False
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
for word in words:
self.insert(snake_case )
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
a__ : Tuple = self
for char in word:
if char not in curr.nodes:
a__ : Tuple = TrieNode()
a__ : str = curr.nodes[char]
a__ : str = True
def _snake_case ( self , snake_case ) -> bool:
"""simple docstring"""
a__ : Dict = self
for char in word:
if char not in curr.nodes:
return False
a__ : Dict = curr.nodes[char]
return curr.is_leaf
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
def _delete(snake_case , snake_case , snake_case ) -> bool:
if index == len(snake_case ):
# If word does not exist
if not curr.is_leaf:
return False
a__ : Optional[Any] = False
return len(curr.nodes ) == 0
a__ : Any = word[index]
a__ : List[str] = curr.nodes.get(snake_case )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a__ : Optional[Any] = _delete(snake_case , snake_case , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , snake_case , 0 )
def _A ( lowerCamelCase , lowerCamelCase ):
if node.is_leaf:
print(lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(lowerCamelCase , word + key )
def _A ( ):
a__ : str = "banana bananas bandana band apple all beast".split()
a__ : Any = TrieNode()
root.insert_many(lowerCamelCase )
# print_words(root, "")
assert all(root.find(lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _A ( lowerCamelCase , lowerCamelCase ):
print(str(lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def _A ( ):
assert test_trie()
def _A ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
"""simple docstring"""
from math import sqrt
def UpperCamelCase (SCREAMING_SNAKE_CASE = 100_0000 ):
UpperCamelCase : int = 0
UpperCamelCase : int = 0
UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : List[str] = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[str] = """swinv2"""
__lowerCAmelCase : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _A=2_2_4 , _A=4 , _A=3 , _A=9_6 , _A=[2, 2, 6, 2] , _A=[3, 6, 1_2, 2_4] , _A=7 , _A=4.0 , _A=True , _A=0.0 , _A=0.0 , _A=0.1 , _A="gelu" , _A=False , _A=0.02 , _A=1e-5 , _A=3_2 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : Dict = embed_dim
UpperCamelCase : List[str] = depths
UpperCamelCase : Optional[Any] = len(_A )
UpperCamelCase : Optional[int] = num_heads
UpperCamelCase : Optional[int] = window_size
UpperCamelCase : List[Any] = mlp_ratio
UpperCamelCase : str = qkv_bias
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : List[Any] = drop_path_rate
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = use_absolute_embeddings
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Dict = initializer_range
UpperCamelCase : str = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : Any = int(embed_dim * 2 ** (len(_A ) - 1) )
UpperCamelCase : str = (0, 0, 0, 0)
| 102 | 1 |
__lowerCamelCase : int = range(2, 20 + 1)
__lowerCamelCase : int = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] ):
snake_case__ : Optional[int] = sum(a_i[j] for j in range(snake_case_ , len(snake_case_ ) ) )
snake_case__ : Optional[Any] = sum(a_i[j] * base[j] for j in range(min(len(snake_case_ ) , snake_case_ ) ) )
snake_case__, snake_case__ : Tuple = 0, 0
snake_case__ : Tuple = n - i
snake_case__ : int = memo.get(snake_case_ )
if sub_memo is not None:
snake_case__ : Tuple = sub_memo.get(snake_case_ )
if jumps is not None and len(snake_case_ ) > 0:
# find and make the largest jump without going over
snake_case__ : Dict = -1
for _k in range(len(snake_case_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case__ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case__, snake_case__, snake_case__ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case__ : str = diff + c
for j in range(min(snake_case_ , len(snake_case_ ) ) ):
snake_case__, snake_case__ : List[Any] = divmod(snake_case_ , 10 )
if new_c > 0:
add(snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : List[Any] = []
else:
snake_case__ : str = {c: []}
snake_case__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case__, snake_case__ : Any = next_term(snake_case_ , k - 1 , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case__, snake_case__ : List[Any] = compute(snake_case_ , snake_case_ , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
snake_case__ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case__ : Optional[int] = 0
while j < len(snake_case_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case_ , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] ):
if i >= n:
return 0, i
if k > len(snake_case_ ):
a_i.extend([0 for _ in range(k - len(snake_case_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case__ : Tuple = i
snake_case__, snake_case__, snake_case__ : List[Any] = 0, 0, 0
for j in range(len(snake_case_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case__ : str = ds_c + ds_b
diff += addend
snake_case__ : Any = 0
for j in range(snake_case_ ):
snake_case__ : Optional[int] = a_i[j] + addend
snake_case__, snake_case__ : str = divmod(snake_case_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case_ , snake_case_ , snake_case_ )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[str] ):
for j in range(snake_case_ , len(snake_case_ ) ):
snake_case__ : int = digits[j] + addend
if s >= 10:
snake_case__, snake_case__ : List[Any] = divmod(snake_case_ , 10 )
snake_case__ : Optional[int] = addend // 10 + quotient
else:
snake_case__ : Optional[Any] = s
snake_case__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
snake_case__, snake_case__ : List[str] = divmod(snake_case_ , 10 )
digits.append(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10**15 ):
snake_case__ : List[Any] = [1]
snake_case__ : int = 1
snake_case__ : List[Any] = 0
while True:
snake_case__, snake_case__ : Optional[Any] = next_term(snake_case_ , 20 , i + dn , snake_case_ )
dn += terms_jumped
if dn == n - i:
break
snake_case__ : Union[str, Any] = 0
for j in range(len(snake_case_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = [0] * no_of_processes
UpperCAmelCase_ : Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
UpperCAmelCase_ : Optional[Any] = burst_time[i]
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = 9_99_99_99_99
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ : Any = remaining_time[j]
UpperCAmelCase_ : Optional[int] = j
UpperCAmelCase_ : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ : Union[str, Any] = remaining_time[short]
if minm == 0:
UpperCAmelCase_ : List[str] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ : Optional[Any] = False
# Find finish time of current process
UpperCAmelCase_ : Union[str, Any] = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ : List[str] = finish_time - arrival_time[short]
UpperCAmelCase_ : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ : Any = 0
# Increment time
increment_time += 1
return waiting_time
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = [0] * no_of_processes
for i in range(A__ ):
UpperCAmelCase_ : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = 0
for i in range(A__ ):
UpperCAmelCase_ : Dict = total_waiting_time + waiting_time[i]
UpperCAmelCase_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCamelCase_ = int(input())
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCamelCase_ , lowerCamelCase_ = map(int, input().split())
lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase_ = burst_time
lowerCamelCase_ = no_of_processes
lowerCamelCase_ = waiting_time
lowerCamelCase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 95 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase ( __snake_case ):
lowercase = """gpt_neo"""
lowercase = ["""past_key_values"""]
lowercase = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , __magic_name__ : int=5_0_2_5_7 , __magic_name__ : List[Any]=2_0_4_8 , __magic_name__ : Optional[Any]=2_0_4_8 , __magic_name__ : int=2_4 , __magic_name__ : int=[[["global", "local"], 1_2]] , __magic_name__ : Any=1_6 , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=2_5_6 , __magic_name__ : Any="gelu_new" , __magic_name__ : str=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=1e-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : List[str]=True , __magic_name__ : Dict=5_0_2_5_6 , __magic_name__ : Optional[Any]=5_0_2_5_6 , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_layers
UpperCamelCase = num_heads
UpperCamelCase = intermediate_size
UpperCamelCase = window_size
UpperCamelCase = activation_function
UpperCamelCase = resid_dropout
UpperCamelCase = embed_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = attention_types
UpperCamelCase = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def lowerCamelCase_ ( __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
import torch
UpperCamelCase = input.size()
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = shape[dimension]
UpperCamelCase = torch.arange(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.div(sizedim - size , SCREAMING_SNAKE_CASE_ , rounding_mode="""floor""" ) + 1
UpperCamelCase = torch.arange(SCREAMING_SNAKE_CASE_ ) + low_indices[:min_length][:, None]
UpperCamelCase = [slice(SCREAMING_SNAKE_CASE_ )] * rank
UpperCamelCase = indices
UpperCamelCase = input[s]
UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
import torch
UpperCamelCase = torch.arange(1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.remainder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = remainders == 0
UpperCamelCase = candidates[divisor_indices]
UpperCamelCase = torch.max(SCREAMING_SNAKE_CASE_ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rounding_mode="""floor""" )
class UpperCAmelCase ( __snake_case ):
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self._config.num_heads
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return 1_3
| 386 | 0 |
'''simple docstring'''
def __A ( a_ : list[list[int]] ,a_ : int ,a_ : int ,a_ : set ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = len(a_ ), len(grid[0] )
if (
min(a_ ,a_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase : Dict = 0
count += depth_first_search(a_ ,row + 1 ,a_ ,a_ )
count += depth_first_search(a_ ,row - 1 ,a_ ,a_ )
count += depth_first_search(a_ ,a_ ,col + 1 ,a_ )
count += depth_first_search(a_ ,a_ ,col - 1 ,a_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 |
'''simple docstring'''
def __A ( a_ : int ):
if not isinstance(a_ ,a_ ):
lowerCAmelCase : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
lowerCAmelCase : Dict = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : int = torch.device('cpu')
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def lowercase__ ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def lowercase__ ( __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = dct.pop(__UpperCamelCase )
__lowercase = val
def lowercase__ ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = []
for k in state_dict.keys():
__lowercase = k
if ".pwconv" in k:
__lowercase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__lowercase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__lowercase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__lowercase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__lowercase = k_new.split(""".""" )
if ls[2].isdigit():
__lowercase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__lowercase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
'''simple docstring'''
__lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowercase = 1000
__lowercase = """huggingface/label-files"""
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowercase = [3, 3, 6, 4]
__lowercase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__lowercase = [3, 3, 9, 6]
__lowercase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__lowercase = [4, 3, 10, 5]
__lowercase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__lowercase = [4, 4, 12, 6]
__lowercase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__lowercase = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" , check_hash=__UpperCamelCase )
else:
__lowercase = torch.load(__UpperCamelCase , map_location="""cpu""" )
__lowercase = checkpoint
__lowercase = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
__lowercase = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
__lowercase = prepare_img()
__lowercase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__lowercase = processor(images=__UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
__lowercase = get_expected_output(__UpperCamelCase )
__lowercase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
snake_case : int = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 566 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( __UpperCamelCase : Exception ):
'''simple docstring'''
__lowercase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__lowercase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 566 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase : Any = 8
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=BITS ):
A : List[Any] = x.device
A : List[Any] = (x * 255).int().clamp(0 , 255 )
A : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ )
A : str = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
A : int = rearrange(lowerCamelCase_ , '''b c h w -> b c 1 h w''' )
A : Optional[int] = ((x & mask) != 0).float()
A : Tuple = rearrange(lowerCamelCase_ , '''b c d h w -> b (c d) h w''' )
A : int = bits * 2 - 1
return bits
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=BITS ):
A : Dict = x.device
A : List[str] = (x > 0).int()
A : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ , dtype=torch.intaa )
A : Dict = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
A : Union[str, Any] = rearrange(lowerCamelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
A : List[str] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 , lowerCamelCase_ = True , lowerCamelCase_=None , lowerCamelCase_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A : int = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A : Tuple = self.alphas_cumprod[timestep]
A : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A : Optional[int] = self.bit_scale
if self.config.clip_sample:
A : List[str] = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A : Union[str, Any] = self._get_variance(lowerCamelCase_ , lowerCamelCase_ )
A : int = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A : List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A : int = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A : str = model_output.device if torch.is_tensor(lowerCamelCase_ ) else '''cpu'''
A : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ ).to(lowerCamelCase_ )
A : Optional[int] = self._get_variance(lowerCamelCase_ , lowerCamelCase_ ) ** 0.5 * eta * noise
A : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def snake_case__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="epsilon" , lowerCamelCase_=None , lowerCamelCase_ = True , ):
A : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A : Any = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
A : Optional[Any] = None
# 1. compute alphas, betas
A : int = self.alphas_cumprod[t]
A : str = self.alphas_cumprod[t - 1] if t > 0 else self.one
A : Any = 1 - alpha_prod_t
A : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A : List[str] = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A : int = self.bit_scale
if self.config.clip_sample:
A : Union[str, Any] = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A : int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A : Optional[int] = 0
if t > 0:
A : Union[str, Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase_ ).to(model_output.device )
A : Any = (self._get_variance(lowerCamelCase_ , predicted_variance=lowerCamelCase_ ) ** 0.5) * noise
A : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1.0 , ) -> Tuple:
super().__init__()
A : Optional[Any] = bit_scale
A : List[str] = (
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
A : int = decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
A : Optional[int] = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A : List[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A : Union[str, Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
A : List[str] = bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 423 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase : Union[str, Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = test_results.split(''' ''' )
A : List[str] = 0
A : str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : Optional[int] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case__ ( lowerCamelCase_ ):
A : Any = {}
A : List[str] = None
A : Dict = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
A : int = True
A : List[str] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
A : Optional[int] = line
A : int = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : Tuple = title
A : List[Any] = doc_test_results['''time_spent'''].split(''',''' )[0]
A : List[Any] = doc_test_results['''success''']
A : Optional[Any] = doc_test_results['''failures''']
A : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Optional[int] = doc_test_results
@property
def snake_case ( self ) -> str:
A : List[str] = [self._time_spent]
A : Dict = 0
for time in time_spent:
A : Optional[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A : List[str] = [0, 0, time_parts[0]]
A , A , A : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : Union[str, Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'
@property
def snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
A : Optional[int] = 40
A : Dict = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
A : List[str] = ''''''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def snake_case ( self ) -> str:
A : Any = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case ( ) -> Dict:
A : int = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__UpperCAmelCase , )
def snake_case ( self ) -> Optional[Any]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
A : Dict = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
A : Any = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : Optional[int] = ''''''
for key, value in failures.items():
A : Optional[int] = value[:2_00] + ''' [Truncated]''' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Optional[Any] = job_name
A : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
A : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ) -> str:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
A : Union[str, Any] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
A : str = sorted(self.doc_test_results.items() , key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
A : List[str] = f'*Num failures* :{len(job_result["failed"] )} \n'
A : Union[str, Any] = job_result['''failures''']
A : str = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'Results for {job}' , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def snake_case__ ( ):
A : Optional[int] = os.environ['''GITHUB_RUN_ID''']
A : Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Any = requests.get(lowerCamelCase_ ).json()
A : Optional[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
A : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
A : str = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def snake_case__ ( lowerCamelCase_ ):
A : Any = {}
if os.path.exists(lowerCamelCase_ ):
A : Tuple = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
A : Any = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.' ) from e
return _artifact
def snake_case__ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Tuple:
A : Dict = name
A : Dict = []
def __str__( self ) -> int:
return self.name
def snake_case ( self , __UpperCAmelCase ) -> List[str]:
self.paths.append({'''name''': self.name, '''path''': path} )
A : Dict[str, Artifact] = {}
A : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
A : Optional[int] = directory
if artifact_name not in _available_artifacts:
A : Any = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowercase : Dict = get_job_links()
lowercase : Union[str, Any] = retrieve_available_artifacts()
lowercase : Any = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase : int = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase : Union[str, Any] = github_actions_job_links.get("run_doctests")
lowercase : List[str] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
lowercase : Optional[Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowercase , lowercase , lowercase : Union[str, Any] = handle_test_results(artifact["stats"])
lowercase : Optional[int] = failed
lowercase : str = success
lowercase : int = time_spent[1:-1] + ", "
lowercase : Union[str, Any] = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowercase : Union[str, Any] = line.replace("FAILED ", "")
lowercase : Any = line.split()[0].replace("\n", "")
if "::" in line:
lowercase , lowercase : Tuple = line.split("::")
else:
lowercase , lowercase : int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase : List[Any] = all_failures[test] if test in all_failures else "N/A"
lowercase : List[str] = failure
break
lowercase : Optional[int] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 423 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : int = 11
_lowerCamelCase : Any = int('''1''' + '''0''' * digit_len )
for num in range(A_, A_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A_, A_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowerCamelCase : Dict = 10
return solutions
def snake_case_ ( A_ : int = 2 ):
'''simple docstring'''
_lowerCamelCase : List[str] = 1.0
for fraction in fraction_list(A_ ):
_lowerCamelCase : List[Any] = Fraction(A_ )
result *= frac.denominator / frac.numerator
return int(A_ )
if __name__ == "__main__":
print(solution())
| 83 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case ( a__):
_lowerCAmelCase = '''umt5'''
_lowerCAmelCase = ['''past_key_values''']
def __init__( self, A=25_0112, A=512, A=64, A=1024, A=8, A=None, A=6, A=32, A=128, A=0.1, A=1e-6, A=1.0, A="gated-gelu", A=True, A=True, A="T5Tokenizer", A=True, A=0, A=1, A=0, **A, ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=A, tokenizer_class=A, tie_word_embeddings=A, pad_token_id=A, eos_token_id=A, decoder_start_token_id=A, **A, )
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : Union[str, Any] = d_kv
lowerCamelCase : List[Any] = d_ff
lowerCamelCase : Tuple = num_layers
lowerCamelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase : int = num_heads
lowerCamelCase : Optional[Any] = relative_attention_num_buckets
lowerCamelCase : int = relative_attention_max_distance
lowerCamelCase : Optional[int] = dropout_rate
lowerCamelCase : Union[str, Any] = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : Optional[int] = feed_forward_proj
lowerCamelCase : Any = use_cache
lowerCamelCase : int = self.feed_forward_proj.split('-' )
lowerCamelCase : Optional[Any] = act_info[-1]
lowerCamelCase : Tuple = act_info[0] == 'gated'
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCamelCase : Dict = 'gelu_new'
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.num_layers
class __snake_case ( a__):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCamelCase : Tuple = 'past_encoder_sequence + sequence'
lowerCamelCase : int = {0: 'batch'}
lowerCamelCase : int = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A, direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 13
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 5e-4
| 449 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase ( UpperCAmelCase__ : np.array):
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : Any = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : List[Any] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Any ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Any ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 31 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( snake_case_ ):
lowercase = 'convbert'
def __init__( self : Union[str, Any] , snake_case : Tuple=3_0_5_2_2 , snake_case : List[Any]=7_6_8 , snake_case : Any=1_2 , snake_case : Optional[int]=1_2 , snake_case : Optional[int]=3_0_7_2 , snake_case : Tuple="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : str=2 , snake_case : Tuple=0.02 , snake_case : Any=1e-12 , snake_case : List[str]=1 , snake_case : Any=0 , snake_case : Tuple=2 , snake_case : Any=7_6_8 , snake_case : Any=2 , snake_case : Tuple=9 , snake_case : int=1 , snake_case : str=None , **snake_case : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Dict = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Any = layer_norm_eps
UpperCamelCase_ : Union[str, Any] = embedding_size
UpperCamelCase_ : int = head_ratio
UpperCamelCase_ : Optional[Any] = conv_kernel_size
UpperCamelCase_ : Any = num_groups
UpperCamelCase_ : int = classifier_dropout
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 417 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''decision_transformer'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=17 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=50_256 ,_SCREAMING_SNAKE_CASE=50_256 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : str = state_dim
UpperCAmelCase_ : Any = act_dim
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = max_ep_len
UpperCAmelCase_ : Any = action_tanh
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = n_positions
UpperCAmelCase_ : List[Any] = n_layer
UpperCAmelCase_ : Any = n_head
UpperCAmelCase_ : Any = n_inner
UpperCAmelCase_ : Tuple = activation_function
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : List[str] = attn_pdrop
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = scale_attn_weights
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : str = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : List[str] = reorder_and_upcast_attn
UpperCAmelCase_ : List[Any] = bos_token_id
UpperCAmelCase_ : str = eos_token_id
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) | 707 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(_lowercase , _lowercase="" , _lowercase="." ):
UpperCAmelCase_ : Any = []
for k, v in d.items():
UpperCAmelCase_ : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(_lowercase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_lowercase , _lowercase , sep=_lowercase ).items() )
else:
items.append((new_key, v) )
return dict(_lowercase )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
with open(_lowercase , '''r''' ) as yaml_file:
try:
UpperCAmelCase_ : Union[str, Any] = yaml.load(_lowercase , Loader=yaml.FullLoader )
UpperCAmelCase_ : Optional[int] = flatten_yaml_as_dict(_lowercase )
for k, v in flat_cfg.items():
setattr(_lowercase , _lowercase , _lowercase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(_lowercase , str(_lowercase ) ) )
return config
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = MobileViTVaConfig()
UpperCAmelCase_ : List[str] = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase_ : Dict = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase_ : Optional[int] = 384
else:
UpperCAmelCase_ : str = 256
UpperCAmelCase_ : str = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase_ : Optional[Any] = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase_ : str = 384
else:
UpperCAmelCase_ : Any = 256
UpperCAmelCase_ : Any = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase_ : int = 151
UpperCAmelCase_ : List[Any] = 512
UpperCAmelCase_ : Optional[Any] = '''ade20k-id2label.json'''
UpperCAmelCase_ : Optional[int] = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase_ : List[Any] = 21
UpperCAmelCase_ : str = 512
UpperCAmelCase_ : Any = '''pascal-voc-id2label.json'''
UpperCAmelCase_ : Union[str, Any] = True
# orig_config
UpperCAmelCase_ : Optional[int] = load_orig_config_file(_lowercase )
assert getattr(_lowercase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase_ : Any = getattr(_lowercase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(_lowercase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase_ : List[Any] = getattr(_lowercase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase_ : Optional[int] = getattr(_lowercase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase_ : Optional[int] = getattr(_lowercase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase_ : Optional[int] = '''huggingface/label-files'''
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : int = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = dct.pop(_lowercase )
UpperCAmelCase_ : Optional[int] = val
def lowerCamelCase__ ( _lowercase , _lowercase=False ):
'''simple docstring'''
if base_model:
UpperCAmelCase_ : Optional[int] = ''''''
else:
UpperCAmelCase_ : List[Any] = '''mobilevitv2.'''
UpperCAmelCase_ : str = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase_ : str = k[8:]
else:
UpperCAmelCase_ : Any = k
if ".block." in k:
UpperCAmelCase_ : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase_ : int = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase_ : List[str] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase_ : Tuple = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase_ : Tuple = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase_ : Any = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase_ : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase_ : str = [0, 1]
elif i == 4:
UpperCAmelCase_ : Optional[int] = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase_ : Tuple = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase_ : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase_ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase_ : List[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase_ : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase_ : str = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase_ : Dict = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase_ : str = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase_ : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase_ : Optional[Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase_ : Optional[int] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(_lowercase )
for k in keys_to_ignore:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_mobilevitva_config(_lowercase , _lowercase )
# load original state_dict
UpperCAmelCase_ : str = torch.load(_lowercase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation(_lowercase ).eval()
UpperCAmelCase_ : Optional[int] = False
else:
UpperCAmelCase_ : List[str] = MobileViTVaForImageClassification(_lowercase ).eval()
UpperCAmelCase_ : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase_ : List[Any] = checkpoint
remove_unused_keys(_lowercase )
UpperCAmelCase_ : str = create_rename_keys(_lowercase , base_model=_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load modified state_dict
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ : Tuple = model(**_lowercase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase_ : Optional[Any] = outputs.logits
UpperCAmelCase_ : Dict = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase_ : int = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
) | 300 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = XLMTokenizer
_snake_case : str = False
def A ( self : int )-> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(A_ ) )
def A ( self : Optional[Any] , A_ : Any )-> List[Any]:
__UpperCamelCase = "lower newer"
__UpperCamelCase = "lower newer"
return input_text, output_text
def A ( self : Optional[int] )-> Union[str, Any]:
__UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase = "lower"
__UpperCamelCase = ["low", "er</w>"]
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase = tokens + ["<unk>"]
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def A ( self : Tuple )-> Optional[Any]:
__UpperCamelCase = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
__UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 505 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_A = {
"RUCAIBox/mvp": 1_024,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = ['input_ids', 'attention_mask']
_snake_case : Any = MvpTokenizer
def __init__( self : str , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Any]=None , A_ : int="replace" , A_ : int="<s>" , A_ : Any="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=False , A_ : List[str]=True , **A_ : Union[str, Any] , )-> Any:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**A_ )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = "post_processor"
__UpperCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase = tuple(state["cls"] )
__UpperCamelCase = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(A_ , state.pop("type" ) )
__UpperCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def A ( self : List[str] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__UpperCamelCase = value
def A ( self : str , *A_ : Dict , **A_ : Dict )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A_ , **A_ )
def A ( self : Tuple , *A_ : str , **A_ : List[str] )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*A_ , **A_ )
def A ( self : Optional[int] , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def A ( self : Any , A_ : Dict , A_ : Dict=None )-> Union[str, Any]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 505 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( __snake_case : list[int], __snake_case : list[int], __snake_case : int ) -> list[int]:
"""simple docstring"""
A__ : List[str] =[0] * no_of_processes
A__ : int =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__snake_case ):
A__ : int =burst_time[i]
A__ : List[str] =0
A__ : Tuple =0
A__ : List[str] =999_999_999
A__ : Optional[int] =0
A__ : int =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ : Tuple =remaining_time[j]
A__ : Any =j
A__ : List[Any] =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ : Optional[int] =remaining_time[short]
if minm == 0:
A__ : Dict =999_999_999
if remaining_time[short] == 0:
complete += 1
A__ : Optional[int] =False
# Find finish time of current process
A__ : Any =increment_time + 1
# Calculate waiting time
A__ : int =finish_time - arrival_time[short]
A__ : Tuple =finar - burst_time[short]
if waiting_time[short] < 0:
A__ : List[str] =0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( __snake_case : list[int], __snake_case : int, __snake_case : list[int] ) -> list[int]:
"""simple docstring"""
A__ : str =[0] * no_of_processes
for i in range(__snake_case ):
A__ : int =burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( __snake_case : list[int], __snake_case : list[int], __snake_case : int ) -> None:
"""simple docstring"""
A__ : str =0
A__ : Optional[int] =0
for i in range(__snake_case ):
A__ : int =total_waiting_time + waiting_time[i]
A__ : Any =total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""", total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__snake_case : Tuple = int(input())
__snake_case : int = [0] * no_of_processes
__snake_case : Union[str, Any] = [0] * no_of_processes
__snake_case : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__snake_case , __snake_case : int = map(int, input().split())
__snake_case : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case : Optional[int] = burst_time
__snake_case : str = no_of_processes
__snake_case : Union[str, Any] = waiting_time
__snake_case : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 687 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : int , *_lowercase : List[str] , **_lowercase : int ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase ) | 271 | import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : Tuple=13 , _lowercase : str=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=5 , _lowercase : str=4 , _lowercase : int=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=512 , _lowercase : int=16 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : List[Any]=4 , ):
"""simple docstring"""
_UpperCamelCase: List[str] = parent
_UpperCamelCase: int = batch_size
_UpperCamelCase: List[str] = seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Optional[Any] = use_attention_mask
_UpperCamelCase: Any = use_token_type_ids
_UpperCamelCase: List[str] = use_labels
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: List[str] = hidden_size
_UpperCamelCase: Union[str, Any] = num_hidden_layers
_UpperCamelCase: Any = num_attention_heads
_UpperCamelCase: List[str] = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: Dict = hidden_dropout_prob
_UpperCamelCase: List[str] = attention_probs_dropout_prob
_UpperCamelCase: str = max_position_embeddings
_UpperCamelCase: Dict = type_vocab_size
_UpperCamelCase: Tuple = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: str = num_choices
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = None
if self.use_attention_mask:
_UpperCamelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: int = config_and_inputs
_UpperCamelCase: int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: int = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_UpperCamelCase: Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Tuple = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) ) | 271 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *a : List[Any] , **a : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , a , )
super().__init__(*a , **a ) | 193 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='wavlm'
def __init__( self : Optional[int] , a : Optional[Any]=32 , a : int=768 , a : Tuple=12 , a : List[str]=12 , a : str=3072 , a : Any="gelu" , a : Dict=0.1 , a : int=0.1 , a : str=0.1 , a : Optional[Any]=0.0 , a : Any=0.1 , a : Any=0.1 , a : List[str]=0.02 , a : List[Any]=1e-5 , a : Any="group" , a : Optional[int]="gelu" , a : List[str]=(512, 512, 512, 512, 512, 512, 512) , a : Any=(5, 2, 2, 2, 2, 2, 2) , a : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Dict=128 , a : Optional[Any]=16 , a : Optional[Any]=320 , a : str=800 , a : Optional[int]=False , a : Tuple=True , a : Optional[Any]=0.05 , a : Any=10 , a : Optional[int]=2 , a : Dict=0.0 , a : str=10 , a : Tuple=320 , a : Optional[int]=2 , a : int=0.1 , a : List[str]=100 , a : Tuple=256 , a : str=256 , a : Tuple=0.1 , a : str="mean" , a : int=False , a : int=False , a : Optional[Any]=256 , a : Any=(512, 512, 512, 512, 1500) , a : Tuple=(5, 3, 3, 1, 1) , a : str=(1, 2, 3, 1, 1) , a : Optional[Any]=512 , a : Optional[Any]=80 , a : Tuple=0 , a : Any=1 , a : Optional[Any]=2 , a : int=False , a : Dict=3 , a : Any=2 , a : List[Any]=3 , a : int=None , **a : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Any = feat_extract_activation
SCREAMING_SNAKE_CASE : Any = list(a )
SCREAMING_SNAKE_CASE : Optional[int] = list(a )
SCREAMING_SNAKE_CASE : Optional[Any] = list(a )
SCREAMING_SNAKE_CASE : Any = conv_bias
SCREAMING_SNAKE_CASE : str = num_buckets
SCREAMING_SNAKE_CASE : str = max_bucket_distance
SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Any = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
SCREAMING_SNAKE_CASE : int = feat_proj_dropout
SCREAMING_SNAKE_CASE : Any = final_dropout
SCREAMING_SNAKE_CASE : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_ctc_classes
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : int = do_stable_layer_norm
SCREAMING_SNAKE_CASE : List[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : int = apply_spec_augment
SCREAMING_SNAKE_CASE : int = mask_time_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : str = num_codevectors_per_group
SCREAMING_SNAKE_CASE : Dict = num_codevector_groups
SCREAMING_SNAKE_CASE : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = num_negatives
SCREAMING_SNAKE_CASE : Optional[int] = codevector_dim
SCREAMING_SNAKE_CASE : int = proj_codevector_dim
SCREAMING_SNAKE_CASE : List[Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Any = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Dict = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : Any = add_adapter
SCREAMING_SNAKE_CASE : Optional[int] = adapter_kernel_size
SCREAMING_SNAKE_CASE : Any = adapter_stride
SCREAMING_SNAKE_CASE : List[Any] = num_adapter_layers
SCREAMING_SNAKE_CASE : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Tuple = xvector_output_dim
@property
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 193 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowercase = datasets.logging.get_logger(__name__)
_lowercase = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowercase = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowercase = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _A (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Dict=False , UpperCamelCase : Tuple=False , UpperCamelCase : Tuple=True , UpperCamelCase : Dict=False , UpperCamelCase : List[str]="dummy_doc" ) ->Tuple:
'''simple docstring'''
lowerCamelCase__ : List[Any] = {doc: key_lines}
lowerCamelCase__ : Dict = {doc: sys_lines}
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : int = 0
lowerCamelCase__ ,lowerCamelCase__ : Tuple = reader.get_doc_mentions(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ : Tuple = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = reader.get_doc_mentions(UpperCamelCase , sys_doc_lines[doc] , UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ : List[Any] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
if remove_nested:
lowerCamelCase__ ,lowerCamelCase__ : int = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ ,lowerCamelCase__ : List[str] = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ : Dict = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def _A (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Tuple ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = get_coref_infos(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : int = 0
for name, metric in metrics:
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : str = evaluator.evaluate_documents(UpperCamelCase , UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
lowerCamelCase__ : Dict = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _A (UpperCamelCase : List[str] ) ->List[str]:
'''simple docstring'''
lowerCamelCase__ : Dict = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCamelCase__ : Optional[Any] = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ : List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
lowerCamelCase__ : Any = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCamelCase__ : Tuple = util.check_gold_parse_annotation(__magic_name__ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ : Any = evaluate(
key_lines=__magic_name__ , sys_lines=__magic_name__ , metrics=__magic_name__ , NP_only=__magic_name__ , remove_nested=__magic_name__ , keep_singletons=__magic_name__ , min_span=__magic_name__ , )
return score
| 157 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
UpperCamelCase :Dict = PegasusConfig
UpperCamelCase :Dict = {}
UpperCamelCase :Union[str, Any] = '''gelu'''
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ):
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : Tuple = pad_token_id
lowerCamelCase__ : List[str] = bos_token_id
def _snake_case (self ):
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase__ : Any = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : Dict = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = 20
lowerCamelCase__ : str = model_class_name(__magic_name__ )
lowerCamelCase__ : List[str] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Dict = model.decode(__magic_name__ , __magic_name__ )
lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[str] = 20
lowerCamelCase__ : Optional[int] = model_class_name(__magic_name__ )
lowerCamelCase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , ) ->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ : List[Any] = np.not_equal(UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase :int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = False
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[str] = False
def _snake_case (self ):
lowerCamelCase__ : Dict = FlaxPegasusModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Any = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[Any] = model_class(__magic_name__ )
@jax.jit
def encode_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ):
return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : str = encode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] = encode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Tuple = model_class(__magic_name__ )
lowerCamelCase__ : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase__ : str = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ , __magic_name__ , __magic_name__ ):
return model.decode(
decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : int = decode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = decode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case (self ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__magic_name__ )
lowerCamelCase__ : List[Any] = np.ones((1, 1) )
lowerCamelCase__ : Optional[int] = model(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCamelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCamelCase__ : Optional[Any] = tokenizer(__magic_name__ , return_tensors="""np""" , truncation=__magic_name__ , max_length=512 , padding=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = model.generate(**__magic_name__ , num_beams=2 ).sequences
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
assert tgt_text == decoded
| 157 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( a_ , a_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(a_ , map_location='cpu' )
SCREAMING_SNAKE_CASE : Optional[int] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Tuple = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : List[str] = v
SCREAMING_SNAKE_CASE : Any = chkpt['params']
SCREAMING_SNAKE_CASE : Optional[Any] = {n: v for n, v in config.items() if not isinstance(a_ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : Tuple = chkpt['dico_word2id']
SCREAMING_SNAKE_CASE : str = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE : Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a_ , a_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a_ , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a_ , indent=2 ) + '\n' )
if __name__ == "__main__":
_lowerCAmelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase :str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 713 | '''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : List[Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.array(a_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Tuple = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Dict = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : int = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return image
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = mask[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(a_ , dim=0 )
return mask
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : UNetaDModel
snake_case__ : RePaintScheduler
def __init__( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ , lowercase__ = 250 , lowercase__ = 0.0 , lowercase__ = 10 , lowercase__ = 10 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : Optional[int] = image
SCREAMING_SNAKE_CASE : List[str] = _preprocess_image(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Any = _preprocess_mask(lowercase__ )
SCREAMING_SNAKE_CASE : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : int = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : Dict = original_image.shape
SCREAMING_SNAKE_CASE : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = eta
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Tuple = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : int = t
SCREAMING_SNAKE_CASE : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 179 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : Any = os.path.join(args.tf_model_dir ,"parameters.json" )
lowerCamelCase : str = json.loads(open(__UpperCAmelCase ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
lowerCamelCase : List[Any] = args.output + ".pt"
lowerCamelCase : Optional[int] = OrderedDict()
with tf.device("/CPU:0" ):
lowerCamelCase : Optional[Any] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase : List[Any] = reader.get_tensor(__UpperCAmelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowerCamelCase : Tuple = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowerCamelCase : str = 8
lowerCamelCase : Optional[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/moe" ):
lowerCamelCase : Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowerCamelCase : Tuple = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowerCamelCase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/softmlp/kernel" ):
lowerCamelCase : List[str] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowerCamelCase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Any = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowerCamelCase : Dict = key_name[-9:-7]
for i in range(16 ):
lowerCamelCase : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowerCamelCase : int = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/mlp" ):
lowerCamelCase : Any = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowerCamelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowerCamelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p1/bias" ):
lowerCamelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowerCamelCase : List[str] = vnp.copy() # same because it is one dimensional
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p2/kernel" ):
lowerCamelCase : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowerCamelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : List[str] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p2/bias" ):
lowerCamelCase : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowerCamelCase : List[Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/ln" ):
lowerCamelCase : List[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowerCamelCase : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
lowerCamelCase : List[Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase : int = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/g" ):
lowerCamelCase : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
lowerCamelCase : int = vnp.copy() # same because it is one dimensional
lowerCamelCase : Optional[int] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/att" ):
lowerCamelCase : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowerCamelCase : List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase : List[Any] = state[:, 0, :, :]
lowerCamelCase : int = state[:, 1, :, :]
lowerCamelCase : Union[str, Any] = state[:, 2, :, :]
lowerCamelCase : Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : List[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Any = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
lowerCamelCase : Dict = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowerCamelCase : Tuple = torch.tensor(__UpperCAmelCase )
lowerCamelCase : Dict = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowerCamelCase : int = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/o/kernel" ):
lowerCamelCase : Optional[int] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowerCamelCase : Tuple = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/an" ):
lowerCamelCase : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowerCamelCase : List[str] = "model.blocks.%d.self_attn.norm.bias" % player
lowerCamelCase : Optional[int] = vnp.copy() # same because it is one dimensional
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/g" ):
lowerCamelCase : List[Any] = "model.blocks.%d.self_attn.norm.weight" % player
lowerCamelCase : List[str] = vnp.copy() # same because it is one dimensional
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowerCamelCase : Union[str, Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowerCamelCase : List[Any] = "model.%s.weight" % nlayer
lowerCamelCase : Optional[int] = vnp.copy() # same in embedded
lowerCamelCase : Union[str, Any] = torch.tensor(__UpperCAmelCase )
if key_name.startswith("model/wte" ):
lowerCamelCase : Optional[Any] = "lm_head.weight"
lowerCamelCase : Optional[Any] = vnp.copy() # same in embedded
lowerCamelCase : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/wob" ):
lowerCamelCase : str = "final_logits_bias"
lowerCamelCase : List[str] = vnp.copy() # same in embedded
lowerCamelCase : Any = state.reshape((1, -1) )
lowerCamelCase : int = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense/kernel":
lowerCamelCase : Tuple = "model.last_project.weight"
lowerCamelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase : Dict = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense_1/bias":
lowerCamelCase : str = "model.last_project.bias"
lowerCamelCase : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase : Optional[int] = torch.tensor(__UpperCAmelCase )
torch.save(__UpperCAmelCase ,args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 311 |
'''simple docstring'''
from ....utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
def __init__( self : int , lowercase_ : Tuple , lowercase_ : Any=None , lowercase_ : List[Any]=2048 ):
snake_case_ = config.__dict__
snake_case_ = modal_hidden_size
if num_labels:
snake_case_ = num_labels
| 640 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a =logging.get_logger(__name__)
a ={
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Dict = '''layoutlmv3'''
def __init__( self , _lowerCamelCase=50265 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-5 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1024 , _lowerCamelCase=128 , _lowerCamelCase=128 , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=128 , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=224 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
vocab_size=_lowerCamelCase , hidden_size=_lowerCamelCase , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , intermediate_size=_lowerCamelCase , hidden_act=_lowerCamelCase , hidden_dropout_prob=_lowerCamelCase , attention_probs_dropout_prob=_lowerCamelCase , max_position_embeddings=_lowerCamelCase , type_vocab_size=_lowerCamelCase , initializer_range=_lowerCamelCase , layer_norm_eps=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =max_ad_position_embeddings
lowerCamelCase__ =coordinate_size
lowerCamelCase__ =shape_size
lowerCamelCase__ =has_relative_attention_bias
lowerCamelCase__ =rel_pos_bins
lowerCamelCase__ =max_rel_pos
lowerCamelCase__ =has_spatial_attention_bias
lowerCamelCase__ =rel_ad_pos_bins
lowerCamelCase__ =max_rel_ad_pos
lowerCamelCase__ =text_embed
lowerCamelCase__ =visual_embed
lowerCamelCase__ =input_size
lowerCamelCase__ =num_channels
lowerCamelCase__ =patch_size
lowerCamelCase__ =classifier_dropout
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Optional[Any] = version.parse('''1.12''' )
@property
def _a ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def _a ( self ):
return 1E-5
@property
def _a ( self ):
return 12
def _a ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 40 , _lowerCamelCase = 40 , ):
setattr(processor.image_processor , "apply_ocr" , _lowerCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ =compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ =processor.tokenizer.num_special_tokens_to_add(_lowerCamelCase )
lowerCamelCase__ =compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ =[[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCamelCase__ =[[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCamelCase__ =self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =dict(
processor(
_lowerCamelCase , text=_lowerCamelCase , boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , ) )
return inputs
| 132 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a =logging.get_logger(__name__)
a ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a ={'mobilebert-uncased': 512}
a ={}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ =getattr(_lowerCamelCase , normalizer_state.pop("type" ) )
lowerCamelCase__ =do_lower_case
lowerCamelCase__ =strip_accents
lowerCamelCase__ =tokenize_chinese_chars
lowerCamelCase__ =normalizer_class(**_lowerCamelCase )
lowerCamelCase__ =do_lower_case
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 132 | 1 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_a : Optional[int] , **_a : Optional[Any] ) -> Any:
super().__init__(*_a , **_a )
self.check_model_type(_a )
def _lowercase ( self : Union[str, Any] , _a : str=None , _a : Union[str, Any]=None , _a : int=None , **_a : str ) -> Optional[int]:
__lowerCamelCase ,__lowerCamelCase : List[str] = {}, {}
if padding is not None:
__lowerCamelCase : int = padding
if truncation is not None:
__lowerCamelCase : Optional[Any] = truncation
if top_k is not None:
__lowerCamelCase : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , _a : Union["Image.Image", str] , _a : str = None , **_a : List[str] ) -> Any:
if isinstance(_a , (Image.Image, str) ) and isinstance(_a , _a ):
__lowerCamelCase : List[Any] = {'image': image, 'question': question}
else:
__lowerCamelCase : int = image
__lowerCamelCase : Dict = super().__call__(_a , **_a )
return results
def _lowercase ( self : int , _a : Optional[int] , _a : str=False , _a : Tuple=False ) -> int:
__lowerCamelCase : Any = load_image(inputs['image'] )
__lowerCamelCase : Optional[int] = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_a , truncation=_a )
__lowerCamelCase : Dict = self.image_processor(images=_a , return_tensors=self.framework )
model_inputs.update(_a )
return model_inputs
def _lowercase ( self : Tuple , _a : Union[str, Any] ) -> Dict:
__lowerCamelCase : str = self.model(**_a )
return model_outputs
def _lowercase ( self : Union[str, Any] , _a : Tuple , _a : Union[str, Any]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
__lowerCamelCase : str = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : Optional[int] = model_outputs.logits.sigmoid()[0]
__lowerCamelCase ,__lowerCamelCase : str = probs.topk(_a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__lowerCamelCase : int = scores.tolist()
__lowerCamelCase : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_a , _a )]
| 459 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_UpperCamelCase = True
except (ImportError, AttributeError):
_UpperCamelCase = object
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> Union[str, Any]:
pass
_UpperCamelCase = False
_UpperCamelCase = logging.get_logger('transformers-cli/serving')
def a_ ( _lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : List[Any] = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_lowerCAmelCase ,args.host ,args.port ,args.workers )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
def _lowercase ( _a : ArgumentParser ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=_a , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=_a , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=_a , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=_a , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=_a , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=_a , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=_a , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=_a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=_a )
def __init__( self : int , _a : Pipeline , _a : str , _a : int , _a : int ) -> Optional[int]:
__lowerCamelCase : int = pipeline
__lowerCamelCase : List[Any] = host
__lowerCamelCase : int = port
__lowerCamelCase : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'Serving model over {host}:{port}' )
__lowerCamelCase : Optional[Any] = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=_a , response_class=_a , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=_a , response_class=_a , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=_a , response_class=_a , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=_a , response_class=_a , methods=['POST'] , ),
] , timeout=600 , )
def _lowercase ( self : Optional[Any] ) -> List[str]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowercase ( self : Tuple ) -> int:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowercase ( self : str , _a : str = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) ) -> Optional[int]:
try:
__lowerCamelCase : Dict = self._pipeline.tokenizer.tokenize(_a )
if return_ids:
__lowerCamelCase : int = self._pipeline.tokenizer.convert_tokens_to_ids(_a )
return ServeTokenizeResult(tokens=_a , tokens_ids=_a )
else:
return ServeTokenizeResult(tokens=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_a )} )
def _lowercase ( self : Optional[Any] , _a : List[int] = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) , ) -> List[Any]:
try:
__lowerCamelCase : Dict = self._pipeline.tokenizer.decode(_a , _a , _a )
return ServeDeTokenizeResult(model='' , text=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_a )} )
async def _lowercase ( self : Dict , _a : Any=Body(_a , embed=_a ) ) -> Optional[int]:
# Check we don't have empty string
if len(_a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCamelCase : Union[str, Any] = self._pipeline(_a )
return ServeForwardResult(output=_a )
except Exception as e:
raise HTTPException(500 , {'error': str(_a )} )
| 459 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =credit_card_number
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(__a ) - 2
for i in range(__a, -1, -2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE__ : List[str] =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =cc_number[:i] + str(__a ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__a ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 1_3 <= len(__a ) <= 1_6:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(__a ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(__a ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323') | 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 665 | 0 |
'''simple docstring'''
from collections.abc import Callable
def lowercase__( __UpperCamelCase: Callable[[float], float] ,__UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : float = a
SCREAMING_SNAKE_CASE : float = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
SCREAMING_SNAKE_CASE : Dict = mid
else:
SCREAMING_SNAKE_CASE : List[Any] = mid
SCREAMING_SNAKE_CASE : Dict = start + (end - start) / 2.0
return mid
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
def A__ ( A_ , A_ ) -> List[Any]:
_lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( A_ , A_ ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowercase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( A_ , A_ , A_ ) -> str:
_lowercase = dct.pop(A_ )
_lowercase = val
def A__ ( A_ ) -> Optional[int]:
if "handwritten" in checkpoint_url:
_lowercase = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( A_ , A_ ) -> str:
_lowercase = ViTConfig(image_size=384 , qkv_bias=A_ )
_lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowercase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowercase = 1_024
_lowercase = 4_096
_lowercase = 24
_lowercase = 16
_lowercase = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = False
_lowercase = "relu"
_lowercase = 1_024
_lowercase = True
_lowercase = False
_lowercase = False
# load HuggingFace model
_lowercase = ViTModel(A_ , add_pooling_layer=A_ )
_lowercase = TrOCRForCausalLM(A_ )
_lowercase = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ )
model.eval()
# load state_dict of original model, rename some keys
_lowercase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , check_hash=A_ )["model"]
_lowercase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowercase = state_dict.pop(A_ )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowercase = val
else:
_lowercase = val
# load state dict
model.load_state_dict(A_ )
# Check outputs on an image
_lowercase = ViTImageProcessor(size=encoder_config.image_size )
_lowercase = RobertaTokenizer.from_pretrained("roberta-large" )
_lowercase = TrOCRProcessor(A_ , A_ )
_lowercase = processor(images=prepare_img(A_ ) , return_tensors="pt" ).pixel_values
# verify logits
_lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowercase = model(pixel_values=A_ , decoder_input_ids=A_ )
_lowercase = outputs.logits
_lowercase = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , A_ , atol=1e-3 ), "First elements of logits not as expected"
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A_ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ : List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 497 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = DanceDiffusionPipeline
__lowerCamelCase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__lowerCamelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Dict = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a_ , use_timestep_embedding=a_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
a__ : Optional[Any] = IPNDMScheduler()
a__ : List[str] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCAmelCase ( self : Tuple , a_ : Tuple , a_ : Any=0 ) -> List[str]:
'''simple docstring'''
if str(a_ ).startswith("mps" ):
a__ : Tuple = torch.manual_seed(a_ )
else:
a__ : int = torch.Generator(device=a_ ).manual_seed(a_ )
a__ : Optional[int] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : str = self.get_dummy_components()
a__ : str = DanceDiffusionPipeline(**a_ )
a__ : Dict = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : Tuple = self.get_dummy_inputs(a_ )
a__ : Union[str, Any] = pipe(**a_ )
a__ : Optional[Any] = output.audios
a__ : Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
a__ : Tuple = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : str = torch_device
a__ : Any = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
a__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : str = torch.manual_seed(0 )
a__ : Any = pipe(generator=a_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
a__ : Tuple = output.audios
a__ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
a__ : Optional[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = torch_device
a__ : str = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
a__ : Any = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : int = torch.manual_seed(0 )
a__ : Optional[Any] = pipe(generator=a_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
a__ : List[str] = output.audios
a__ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
a__ : Optional[int] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 251 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = CLIPTokenizer
__lowerCamelCase : Optional[Any] = CLIPTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__ : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def UpperCAmelCase ( self : Optional[Any] , **a_ : Tuple ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , **a_ : Any ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , a_ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = "lower newer"
a__ : Dict = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = "lower newer"
a__ : Tuple = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a__ : List[str] = tokens + [tokenizer.unk_token]
a__ : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : str = tokenizer_s.tokenize(a_ )
a__ : int = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Dict = "xa\u0303y" + " " + "x\xe3y"
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Optional[int] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : str = tokenizer_s.tokenize(a_ )
a__ : List[Any] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
a__ : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Dict = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a__ : List[Any] = F" {text}"
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass | 251 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : List[Any] = 13
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : int = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : List[str] = 99
_UpperCAmelCase : int = 384
_UpperCAmelCase : int = 2
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Union[str, Any] = 37
_UpperCAmelCase : str = "gelu"
_UpperCAmelCase : Dict = 0.1
_UpperCAmelCase : Optional[Any] = 0.1
_UpperCAmelCase : List[Any] = 512
_UpperCAmelCase : Optional[int] = 16
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Union[str, Any] = 0.02
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Optional[Any] = 128
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = 9
_UpperCAmelCase : str = 1
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = None
if self.use_input_mask:
_UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFConvBertModel(config=A_ )
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCAmelCase : Tuple = [input_ids, input_mask]
_UpperCAmelCase : List[str] = model(A_ )
_UpperCAmelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
_UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Dict = TFConvBertForSequenceClassification(config=A_ )
_UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCAmelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : int = TFConvBertForMultipleChoice(config=A_ )
_UpperCAmelCase : str = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : int = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : int = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCAmelCase : int = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = TFConvBertForTokenClassification(config=A_ )
_UpperCAmelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCAmelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = TFConvBertForQuestionAnswering(config=A_ )
_UpperCAmelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCAmelCase : int = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = config_and_inputs
_UpperCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = TFConvBertModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Dict = True
if hasattr(A_ , "use_cache" ):
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCAmelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = self._prepare_for_class(A_ , A_ )
_UpperCAmelCase : int = model_class(A_ )
_UpperCAmelCase : str = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
_UpperCAmelCase : List[Any] = os.path.join(A_ , "saved_model" , "1" )
_UpperCAmelCase : Optional[Any] = tf.keras.models.load_model(A_ )
_UpperCAmelCase : List[str] = model(A_ )
if self.is_encoder_decoder:
_UpperCAmelCase : Dict = outputs["encoder_hidden_states"]
_UpperCAmelCase : Any = outputs["encoder_attentions"]
else:
_UpperCAmelCase : Optional[Any] = outputs["hidden_states"]
_UpperCAmelCase : List[Any] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
_UpperCAmelCase : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Optional[int] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_UpperCAmelCase : Any = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCAmelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
_UpperCAmelCase : str = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
_UpperCAmelCase : Any = len(A_ )
self.assertEqual(out_len % 2 , 0 )
_UpperCAmelCase : Tuple = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
_UpperCAmelCase : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Tuple = model_class(A_ )
_UpperCAmelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : Optional[int] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
_UpperCAmelCase : List[Any] = model_class(A_ )
_UpperCAmelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[str] = model_class(A_ )
_UpperCAmelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
_UpperCAmelCase : int = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : List[Any] = model_class(A_ )
_UpperCAmelCase : Optional[int] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class a ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : Union[str, Any] = model(A_ )[0]
_UpperCAmelCase : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , A_ )
_UpperCAmelCase : Any = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 300 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: str ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
_UpperCAmelCase : Optional[int] = GPTaConfig()
else:
_UpperCAmelCase : Optional[Any] = GPTaConfig.from_json_file(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase : Optional[int] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 300 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_UpperCamelCase : List[str] =R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__snake_case )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = "rag"
__snake_case : List[str] = True
def __init__( self ,A__=None ,A__=True ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=" / " ,A__=" // " ,A__=5 ,A__=300 ,A__=768 ,A__=8 ,A__="wiki_dpr" ,A__="train" ,A__="compressed" ,A__=None ,A__=None ,A__=False ,A__=False ,A__=0.0 ,A__=True ,A__=False ,A__=False ,A__=False ,A__=True ,A__=None ,**A__ ,):
super().__init__(
bos_token_id=A__ ,pad_token_id=A__ ,eos_token_id=A__ ,decoder_start_token_id=A__ ,forced_eos_token_id=A__ ,is_encoder_decoder=A__ ,prefix=A__ ,vocab_size=A__ ,**A__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A : Union[str, Any] = kwargs.pop('''question_encoder''' )
_A : int = question_encoder_config.pop('''model_type''' )
_A : int = kwargs.pop('''generator''' )
_A : List[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A : Dict = AutoConfig.for_model(A__ ,**A__ )
_A : Optional[Any] = AutoConfig.for_model(A__ ,**A__ )
_A : int = reduce_loss
_A : Tuple = label_smoothing
_A : Any = exclude_bos_score
_A : List[Any] = do_marginalize
_A : Any = title_sep
_A : Union[str, Any] = doc_sep
_A : Dict = n_docs
_A : Tuple = max_combined_length
_A : Optional[int] = dataset
_A : Union[str, Any] = dataset_split
_A : Any = index_name
_A : Optional[int] = retrieval_vector_size
_A : Tuple = retrieval_batch_size
_A : Union[str, Any] = passages_path
_A : List[str] = index_path
_A : Union[str, Any] = use_dummy_dataset
_A : int = output_retrieved
_A : List[Any] = do_deduplication
_A : Tuple = use_cache
if self.forced_eos_token_id is None:
_A : List[Any] = getattr(self.generator ,'''forced_eos_token_id''' ,A__ )
@classmethod
def A__ ( cls ,A__ ,A__ ,**A__ ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**A__ )
def A__ ( self ):
_A : int = copy.deepcopy(self.__dict__ )
_A : Union[str, Any] = self.question_encoder.to_dict()
_A : Union[str, Any] = self.generator.to_dict()
_A : Dict = self.__class__.model_type
return output
| 332 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[Any] = "M-CLIP"
def __init__( self ,A__=1024 ,A__=768 ,**A__ ):
_A : Tuple = transformerDimSize
_A : Optional[Any] = imageDimSize
super().__init__(**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : int = MCLIPConfig
def __init__( self ,A__ ,*A__ ,**A__ ):
super().__init__(A__ ,*A__ ,**A__ )
_A : Optional[int] = XLMRobertaModel(A__ )
_A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def A__ ( self ,A__ ,A__ ):
_A : str = self.transformer(input_ids=A__ ,attention_mask=A__ )[0]
_A : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A__ ), embs
| 332 | 1 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( lowerCAmelCase_ ):
_snake_case : Dict = CustomTokenizer
pass
| 277 | """simple docstring"""
import heapq
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase ,[-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 277 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ = '''path-to-your-trained-model'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCAmelCase_ = '''A photo of sks dog in a bucket'''
lowerCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 635 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = ['PerceiverFeatureExtractor']
lowerCAmelCase_ : Dict = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 692 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : int ):
_a = [[] for _ in range(__a )]
_a = size
def __getitem__( self : int , __a : int ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self : Dict ):
return self._size
def UpperCamelCase__ ( self : Union[str, Any] , __a : int , __a : int , __a : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A = logging.get_logger(__name__)
A = {"vocab_file": "spiece.model"}
A = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
A = "▁"
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= VOCAB_FILES_NAMES
A__= PRETRAINED_VOCAB_FILES_MAP
A__= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__= ['input_ids', 'attention_mask']
def __init__( self : Tuple , _lowercase : int , _lowercase : str="</s>" , _lowercase : Optional[Any]="<unk>" , _lowercase : Dict="<pad>" , _lowercase : Tuple=1_00 , _lowercase : str=None , _lowercase : Optional[Dict[str, Any]] = None , _lowercase : str=True , **_lowercase : Optional[Any] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda _lowercase : bool("extra_id" in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
UpperCAmelCase__ = legacy
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@staticmethod
def _UpperCAmelCase ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : str ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return list(
set(filter(lambda _lowercase : bool(re.search(r"<extra_id_\d+>" , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return [self._convert_token_to_id(lowercase_ ) for token in self.get_sentinel_tokens()]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[int] ):
"""simple docstring"""
if len(lowercase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = self._add_eos_if_not_present(lowercase_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowercase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Optional[Any] , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : int , _lowercase : "TextInput" , **_lowercase : Optional[int] ):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase__ = SPIECE_UNDERLINE + text.replace(lowercase_ , " " )
return super().tokenize(lowercase_ , **lowercase_ )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : int , **_lowercase : Optional[int] ):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase__ = text.startswith(lowercase_ )
if is_first:
UpperCAmelCase__ = text[1:]
UpperCAmelCase__ = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowercase_ ):
UpperCAmelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
if token.startswith("<extra_id_" ):
UpperCAmelCase__ = re.match(r"<extra_id_(\d+)>" , lowercase_ )
UpperCAmelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_ )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Any ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(lowercase_ )
else:
UpperCAmelCase__ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _UpperCAmelCase ( self : Dict , _lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
UpperCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase__ = True
UpperCAmelCase__ = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase__ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def _UpperCAmelCase ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 716 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A = 6378137.0
A = 6356752.314245
A = 637_8137
def __UpperCAmelCase ( __A , __A , __A , __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ = haversine_distance(__A , __A , __A , __A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ = (b_lata + b_lata) / 2
UpperCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ = (sin(__A ) ** 2) * (cos(__A ) ** 2)
UpperCAmelCase__ = cos(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma - sin(__A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ = (cos(__A ) ** 2) * (sin(__A ) ** 2)
UpperCAmelCase__ = sin(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma + sin(__A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase : Any = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
lowerCamelCase : List[Any] = {
"""camembert-base""": 512,
}
lowerCamelCase : Any = """▁"""
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = CamembertTokenizer
def __init__( self : Tuple , A_ : Tuple=None , A_ : Tuple=None , A_ : Optional[Any]="<s>" , A_ : List[str]="</s>" , A_ : Dict="</s>" , A_ : Tuple="<s>" , A_ : Tuple="<unk>" , A_ : List[Any]="<pad>" , A_ : List[str]="<mask>" , A_ : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **A_ : int , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : Any , A_ : str , A_ : Optional[str] = None ) -> List[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 70 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Union[str, Any] = threading.Lock()
_lowerCAmelCase : Optional[logging.Handler] = None
_lowerCAmelCase : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_lowerCAmelCase : Dict = logging.WARNING
_lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE__ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : Any = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : int = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
return log_levels
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : Dict = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : Union[str, Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : List[str] , **snake_case : str )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case )
if no_advisory_warnings:
return
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : int = warning_advice
@functools.lru_cache(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case : Dict , **snake_case : Any )-> Any:
'''simple docstring'''
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : Tuple = warning_once
class lowerCAmelCase__ :
def __init__( self : List[str] , *snake_case__ : Any , **snake_case__ : List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : List[Any] = args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
def empty_fn(*snake_case__ : Dict , **snake_case__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
'''simple docstring'''
return self
def __exit__( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] ):
'''simple docstring'''
return
class lowerCAmelCase__ :
def __call__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def __a ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : Optional[int] = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 438 | 0 |
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCAmelCase_ (__a : str , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = []
_a : Optional[Any] = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
_a : Tuple = subprocess.run(__a , shell=__a , stdout=subprocess.PIPE )
_a : Dict = output.stdout.decode('utf-8' )
_a : Union[str, Any] = json.loads(__a )
_a : Optional[int] = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__a )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(__a ) )
if len(__a ) > 0:
_a : str = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
__lowerCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 717 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase_ : int = parser.parse_args()
UpperCAmelCase_ : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase_ : Optional[Any] = CLIPImageProcessor()
UpperCAmelCase_ : Tuple = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase_ : Optional[int] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 21 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCAmelCase_ : Union[str, Any] = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCAmelCase_ : Tuple = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCAmelCase_ : Tuple = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCAmelCase_ : Optional[Any] = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCAmelCase_ : str = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCAmelCase_ : Union[str, Any] = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def _lowerCAmelCase(a : List[Any] ) -> Union[str, Any]:
if isinstance(a , a ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _lowerCAmelCase(a : List[Any] , a : Any , a : Tuple , a : Dict , a : Optional[int]=False ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _lowerCAmelCase(a : List[str] , a : Tuple , a : int , a : Tuple , a : Any=None ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.norm.weight"""]
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.norm.bias"""]
_SCREAMING_SNAKE_CASE =weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =(
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE =checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowerCAmelCase(a : str , a : str ) -> str:
_SCREAMING_SNAKE_CASE =torch.load(a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =checkpoint['''time_embed.0.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''time_embed.0.bias''']
_SCREAMING_SNAKE_CASE =checkpoint['''time_embed.2.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE =checkpoint['''label_emb.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''input_blocks.0.0.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''input_blocks.0.0.bias''']
_SCREAMING_SNAKE_CASE =unet_config['''down_block_types''']
_SCREAMING_SNAKE_CASE =unet_config['''layers_per_block''']
_SCREAMING_SNAKE_CASE =unet_config['''attention_head_dim''']
_SCREAMING_SNAKE_CASE =unet_config['''block_out_channels''']
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =channels_list[0]
for i, layer_type in enumerate(a ):
_SCREAMING_SNAKE_CASE =channels_list[i]
_SCREAMING_SNAKE_CASE =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a ):
_SCREAMING_SNAKE_CASE =f"""down_blocks.{i}.resnets.{j}"""
_SCREAMING_SNAKE_CASE =f"""input_blocks.{current_layer}.0"""
_SCREAMING_SNAKE_CASE =True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a , has_skip=a )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a ):
_SCREAMING_SNAKE_CASE =f"""down_blocks.{i}.resnets.{j}"""
_SCREAMING_SNAKE_CASE =f"""input_blocks.{current_layer}.0"""
_SCREAMING_SNAKE_CASE =True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a , has_skip=a )
_SCREAMING_SNAKE_CASE =f"""down_blocks.{i}.attentions.{j}"""
_SCREAMING_SNAKE_CASE =f"""input_blocks.{current_layer}.1"""
_SCREAMING_SNAKE_CASE =convert_attention(
a , a , a , a , a )
current_layer += 1
if i != len(a ) - 1:
_SCREAMING_SNAKE_CASE =f"""down_blocks.{i}.downsamplers.0"""
_SCREAMING_SNAKE_CASE =f"""input_blocks.{current_layer}.0"""
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a )
current_layer += 1
_SCREAMING_SNAKE_CASE =current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE ='''mid_block.resnets.0'''
_SCREAMING_SNAKE_CASE ='''middle_block.0'''
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a )
_SCREAMING_SNAKE_CASE ='''mid_block.attentions.0'''
_SCREAMING_SNAKE_CASE ='''middle_block.1'''
_SCREAMING_SNAKE_CASE =convert_attention(a , a , a , a , a )
_SCREAMING_SNAKE_CASE ='''mid_block.resnets.1'''
_SCREAMING_SNAKE_CASE ='''middle_block.2'''
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =unet_config['''up_block_types''']
for i, layer_type in enumerate(a ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE =f"""up_blocks.{i}.resnets.{j}"""
_SCREAMING_SNAKE_CASE =f"""output_blocks.{current_layer}.0"""
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a , has_skip=a )
current_layer += 1
if i != len(a ) - 1:
_SCREAMING_SNAKE_CASE =f"""up_blocks.{i}.upsamplers.0"""
_SCREAMING_SNAKE_CASE =f"""output_blocks.{current_layer-1}.1"""
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE =f"""up_blocks.{i}.resnets.{j}"""
_SCREAMING_SNAKE_CASE =f"""output_blocks.{current_layer}.0"""
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a , has_skip=a )
_SCREAMING_SNAKE_CASE =f"""up_blocks.{i}.attentions.{j}"""
_SCREAMING_SNAKE_CASE =f"""output_blocks.{current_layer}.1"""
_SCREAMING_SNAKE_CASE =convert_attention(
a , a , a , a , a )
current_layer += 1
if i != len(a ) - 1:
_SCREAMING_SNAKE_CASE =f"""up_blocks.{i}.upsamplers.0"""
_SCREAMING_SNAKE_CASE =f"""output_blocks.{current_layer-1}.2"""
_SCREAMING_SNAKE_CASE =convert_resnet(a , a , a , a )
_SCREAMING_SNAKE_CASE =checkpoint['''out.0.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''out.0.bias''']
_SCREAMING_SNAKE_CASE =checkpoint['''out.2.weight''']
_SCREAMING_SNAKE_CASE =checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = strabool(args.class_cond)
UpperCAmelCase_ : Optional[int] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCAmelCase_ : List[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCAmelCase_ : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : str = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCAmelCase_ : Optional[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCAmelCase_ : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCAmelCase_ : Dict = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ : Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(**scheduler_config)
UpperCAmelCase_ : int = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 255 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A__ :
_UpperCAmelCase :List[str]
_UpperCAmelCase :Optional[str] = None
# Automatically constructed
_UpperCAmelCase :ClassVar[str] = "dict"
_UpperCAmelCase :ClassVar[Any] = None
_UpperCAmelCase :str = field(default='Translation' , init=__snake_case , repr=__snake_case )
def __call__( self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase( self ):
'''simple docstring'''
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class A__ :
_UpperCAmelCase :Optional[List] = None
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :Optional[str] = None
# Automatically constructed
_UpperCAmelCase :ClassVar[str] = "dict"
_UpperCAmelCase :ClassVar[Any] = None
_UpperCAmelCase :str = field(default='TranslationVariableLanguages' , init=__snake_case , repr=__snake_case )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None
UpperCamelCase : str = len(self.languages ) if self.languages else None
def __call__( self ):
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = set(self.languages )
if self.languages and set(A_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({", ".join(A_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCamelCase : List[str] = []
for lang, text in translation_dict.items():
if isinstance(A_ , A_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCamelCase , UpperCamelCase : Dict = zip(*sorted(A_ ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase( self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 38 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
if len(_a) == 0:
return []
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = min(_a), max(_a)
SCREAMING_SNAKE_CASE : Dict = int(max_value - min_value) + 1
SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_a)]
for i in my_list:
buckets[int(i - min_value)].append(_a)
return [v for bucket in buckets for v in sorted(_a)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 25 | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a__ :
def __init__( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : Any=64 , UpperCamelCase_ : str=32 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[Any]=37 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=512 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Optional[int] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : str = embedding_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : int = scope
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Tuple = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MegatronBertModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase)
__UpperCAmelCase : str = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase)
__UpperCAmelCase : Tuple = model(_lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def a_ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MegatronBertForMaskedLM(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MegatronBertForCausalLM(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Optional[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def a_ ( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MegatronBertForPreTraining(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def a_ ( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = MegatronBertForQuestionAnswering(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : str = MegatronBertForSequenceClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : List[str] = MegatronBertForTokenClassification(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Any = MegatronBertForMultipleChoice(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__UpperCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase : Union[str, Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = config_and_inputs
__UpperCAmelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
# test_resize_embeddings = False
lowercase_ = False
def a_ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False):
"""simple docstring"""
__UpperCAmelCase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
if return_labels:
if model_class in get_values(_lowerCAmelCase):
__UpperCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase)
__UpperCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase)
return inputs_dict
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = MegatronBertModelTester(self)
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase)
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
A = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available.")
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__UpperCAmelCase : Optional[Any] = os.path.join(os.environ["MYDIR"] , _lowerCAmelCase)
__UpperCAmelCase : int = MegatronBertModel.from_pretrained(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.half()
__UpperCAmelCase : Any = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
__UpperCAmelCase : List[str] = model(_lowerCAmelCase)[0]
__UpperCAmelCase : List[Any] = torch.Size((1, 9, 1024))
self.assertEqual(output.shape , _lowerCAmelCase)
__UpperCAmelCase : List[Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
__UpperCAmelCase : Tuple = output[0, ii, jj]
__UpperCAmelCase : List[Any] = expected[3 * ii + jj]
__UpperCAmelCase : List[Any] = "ii={} jj={} a={} b={}".format(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
self.assertTrue(math.isclose(_lowerCAmelCase , _lowerCAmelCase , rel_tol=_lowerCAmelCase , abs_tol=_lowerCAmelCase) , msg=_lowerCAmelCase)
| 709 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ConsistencyModelPipeline
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def a_ ( self : Any , UpperCamelCase_ : int=False):
"""simple docstring"""
if class_cond:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
else:
__UpperCAmelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : str = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : str = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : str = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : int = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = None
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Tuple = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : List[str]=(1, 3, 64, 64)):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : int = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__UpperCAmelCase : int = self.get_fixed_latents(seed=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ , shape=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = latents
return inputs
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int=0 , UpperCamelCase_ : Tuple="cpu" , UpperCamelCase_ : Tuple=torch.floataa , UpperCamelCase_ : Optional[Any]=(1, 3, 64, 64)):
"""simple docstring"""
if type(UpperCamelCase_) == str:
__UpperCAmelCase : Union[str, Any] = torch.device(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_)
return latents
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Dict = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = self.get_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Any = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_inputs()
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : int = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 487 | 0 |
from collections.abc import Sequence
def __lowerCamelCase ( __lowerCAmelCase : Sequence[float] , __lowerCAmelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCamelCase ( __lowerCAmelCase : Sequence[float] , __lowerCAmelCase : float ) -> float:
__UpperCamelCase : Any = 0.0
for coeff in reversed(__lowerCAmelCase ):
__UpperCamelCase : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 269 |
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
re.sub("""<n>""" , """""" , __lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCAmelCase ) )
| 269 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( UpperCAmelCase ):
a_ = (DEISMultistepScheduler,)
a_ = (('''num_inference_steps''', 2_5),)
def snake_case__ ( self : int , **__a : Union[str, Any] ) -> int:
__UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__a )
return config
def snake_case__ ( self : Optional[Any] , __a : Dict=0 , **__a : int ) -> List[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**__a )
__UpperCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__UpperCAmelCase = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCAmelCase , __UpperCAmelCase = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Union[str, Any] ) -> Dict:
pass
def snake_case__ ( self : str , __a : Dict=0 , **__a : List[Any] ) -> str:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__UpperCAmelCase = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[Any] , __a : List[str]=None , **__a : List[str] ) -> Optional[int]:
if scheduler is None:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**__a )
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**__a )
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = 1_0
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(__a , __a )
__UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
return sample
def snake_case__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , '''set_timesteps''' ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self : Union[str, Any] ) -> str:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
__UpperCAmelCase = self.full_loop(scheduler=__a )
__UpperCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__UpperCAmelCase = self.full_loop(scheduler=__a )
__UpperCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self : List[Any] ) -> Optional[int]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case__ ( self : Dict ) -> List[str]:
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='''deis''' , solver_order=__a , solver_type=__a , )
def snake_case__ ( self : List[str] ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case__ ( self : Optional[Any] ) -> int:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
__UpperCAmelCase = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def snake_case__ ( self : List[str] ) -> Dict:
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def snake_case__ ( self : str ) -> Tuple:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def snake_case__ ( self : int ) -> List[Any]:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self : Tuple ) -> int:
__UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
__UpperCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case__ ( self : int ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = 1_0
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(__a , __a )
__UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 705 | '''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : str ) -> List[Any]:
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = BlipImageProcessor()
_lowerCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_lowerCAmelCase = BlipaProcessor(__snake_case , __snake_case )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] , **__snake_case : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).tokenizer
def lowercase__ ( self : Any , **__snake_case : int ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def lowercase__ ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_lowerCAmelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""np""" )
_lowerCAmelCase = processor(images=__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Optional[Any] ) -> int:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=__snake_case )
_lowerCAmelCase = tokenizer(__snake_case , return_token_type_ids=__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase__ ( self : Any ) -> Union[str, Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(__snake_case )
_lowerCAmelCase = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 207 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A__ : Tuple =logging.get_logger(__name__)
A__ : Any ={
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Tuple = '''imagegpt'''
_lowercase: Optional[int] = ['''past_key_values''']
_lowercase: str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , __snake_case : Optional[Any]=5_12 + 1 , __snake_case : Union[str, Any]=32 * 32 , __snake_case : List[Any]=5_12 , __snake_case : Any=24 , __snake_case : Optional[Any]=8 , __snake_case : List[Any]=None , __snake_case : str="quick_gelu" , __snake_case : List[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=1E-5 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=False , __snake_case : Dict=False , __snake_case : Union[str, Any]=False , **__snake_case : Union[str, Any] , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=__snake_case , **__snake_case )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowercase__ ( self : Union[str, Any] , __snake_case : "FeatureExtractionMixin" , __snake_case : int = 1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) )
return inputs
| 207 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 706 |
"""simple docstring"""
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> float:
'''simple docstring'''
__UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase () -> Dict:
'''simple docstring'''
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 | 0 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase = getLogger(__name__)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :List[str] , a_ :str , a_ :str , a_ :int = 8 , a_ :str = DEFAULT_DEVICE , a_ :Optional[int]=False , a_ :Any="summarization" , a_ :List[Any]=None , **a_ :Optional[int] , ) -> Dict:
lowercase :Dict = Path(a_).open('''w''' , encoding='''utf-8''')
lowercase :Dict = str(a_)
lowercase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a_).to(a_)
if fpaa:
lowercase :Tuple = model.half()
lowercase :Optional[Any] = AutoTokenizer.from_pretrained(a_)
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""") # if this is wrong, check config.model_type.
lowercase :Any = time.time()
# update config with task specific params
use_task_specific_params(a_ , a_)
if prefix is None:
lowercase :List[str] = prefix or getattr(model.config , '''prefix''' , '''''') or ''''''
for examples_chunk in tqdm(list(chunks(a_ , a_))):
lowercase :int = [prefix + text for text in examples_chunk]
lowercase :Optional[int] = tokenizer(a_ , return_tensors='''pt''' , truncation=a_ , padding='''longest''').to(a_)
lowercase :List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a_ , )
lowercase :List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_)
for hypothesis in dec:
fout.write(hypothesis + '''\n''')
fout.flush()
fout.close()
lowercase :Tuple = int(time.time() - start_time) # seconds
lowercase :List[str] = len(a_)
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4)}
def lowerCamelCase () -> Optional[Any]:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''')
def lowerCamelCase (a_ :Optional[Any]=True) -> Dict:
lowercase :int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a_ , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''input_path''' , type=a_ , help='''like cnn_dm/test.source''')
parser.add_argument('''save_path''' , type=a_ , help='''where to save summaries''')
parser.add_argument('''--reference_path''' , type=a_ , required=a_ , help='''like cnn_dm/test.target''')
parser.add_argument('''--score_path''' , type=a_ , required=a_ , default='''metrics.json''' , help='''where to save metrics''')
parser.add_argument('''--device''' , type=a_ , required=a_ , default=a_ , help='''cuda, cuda:1, cpu etc.''')
parser.add_argument(
'''--prefix''' , type=a_ , required=a_ , default=a_ , help='''will be added to the begininng of src examples''')
parser.add_argument('''--task''' , type=a_ , default='''summarization''' , help='''used for task_specific_params + metrics''')
parser.add_argument('''--bs''' , type=a_ , default=8 , required=a_ , help='''batch size''')
parser.add_argument(
'''--n_obs''' , type=a_ , default=-1 , required=a_ , help='''How many observations. Defaults to all.''')
parser.add_argument('''--fp16''' , action='''store_true''')
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''')
parser.add_argument(
'''--info''' , nargs='''?''' , type=a_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowercase , lowercase :List[Any] = parser.parse_known_args()
lowercase :Any = parse_numeric_n_bool_cl_kwargs(a_)
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""")
lowercase :Any = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
if args.n_obs > 0:
lowercase :List[Any] = examples[: args.n_obs]
Path(args.save_path).parent.mkdir(exist_ok=a_)
if args.reference_path is None and Path(args.score_path).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""")
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''')
lowercase :Optional[Any] = generate_summaries_or_translations(
a_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a_ , )
if args.reference_path is None:
return {}
# Compute scores
lowercase :Optional[Any] = calculate_bleu if '''translation''' in args.task else calculate_rouge
lowercase :List[str] = [x.rstrip() for x in open(args.save_path).readlines()]
lowercase :Dict = [x.rstrip() for x in open(args.reference_path).readlines()][: len(a_)]
lowercase :dict = score_fn(a_ , a_)
scores.update(a_)
if args.dump_args:
scores.update(a_)
if args.info:
lowercase :int = args.info
if verbose:
print(a_)
if args.score_path is not None:
json.dump(a_ , open(args.score_path , '''w'''))
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 677 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.dummy_uncond_unet
UpperCAmelCase__ : Optional[Any] = ScoreSdeVeScheduler()
UpperCAmelCase__ : str = ScoreSdeVePipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = sde_ve(num_inference_steps=2 ,output_type='''numpy''' ,generator=lowerCamelCase_ ).images
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = sde_ve(num_inference_steps=2 ,output_type='''numpy''' ,generator=lowerCamelCase_ ,return_dict=lowerCamelCase_ )[
0
]
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = '''google/ncsnpp-church-256'''
UpperCAmelCase__ : List[str] = UNetaDModel.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Any = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = ScoreSdeVePipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = sde_ve(num_inference_steps=10 ,output_type='''numpy''' ,generator=lowerCamelCase_ ).images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 496 | '''simple docstring'''
import math
def __UpperCamelCase( _A : float , _A : float ):
'''simple docstring'''
return math.pow(_A , 2 ) - a
def __UpperCamelCase( _A : float ):
'''simple docstring'''
return 2 * x
def __UpperCamelCase( _A : float ):
'''simple docstring'''
UpperCAmelCase__ : int = 2.0
while start <= a:
UpperCAmelCase__ : int = math.pow(_A , 2 )
return start
def __UpperCamelCase( _A : float , _A : int = 99_99 , _A : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase__ : Any = get_initial_point(_A )
for _ in range(_A ):
UpperCAmelCase__ : Optional[Any] = value
UpperCAmelCase__ : Tuple = value - fx(_A , _A ) / fx_derivative(_A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 496 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A_ = None
A_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.pa_type
def _lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(lowerCAmelCase_ , '''rb''' , use_auth_token=lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def _lowercase ( self : Dict , lowerCAmelCase_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : List[str] ):
with xopen(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase ( UpperCAmelCase )-> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCAmelCase ,format=UpperCAmelCase )
return buffer.getvalue()
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if hasattr(UpperCAmelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = first_non_null_value(UpperCAmelCase )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
elif isinstance(UpperCAmelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 393 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''projector.weight''']
UpperCAmelCase_ = downstream_dict['''projector.bias''']
UpperCAmelCase_ = downstream_dict['''model.post_net.linear.weight''']
UpperCAmelCase_ = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''model.linear.weight''']
UpperCAmelCase_ = downstream_dict['''model.linear.bias''']
return model
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''connector.weight''']
UpperCAmelCase_ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCAmelCase_ = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCAmelCase_ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
UpperCAmelCase_ = checkpoint['''Downstream''']
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
UpperCAmelCase_ = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
UpperCAmelCase_ = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForXVector''' ):
UpperCAmelCase_ = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 715 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A ( __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ = script_name
else:
UpperCAmelCase_ = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase_ = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = test_command_parser()
UpperCAmelCase_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 561 | 0 |
SCREAMING_SNAKE_CASE :Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
SCREAMING_SNAKE_CASE :Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE :List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 55 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
a = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
a = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.loads(f.read() )
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = b
__SCREAMING_SNAKE_CASE = idx
for wd in b:
__SCREAMING_SNAKE_CASE = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __a ( _snake_case ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : str="<|endoftext|>" ,lowerCamelCase : List[Any]="<|endoftext|>" ,lowerCamelCase : str="<|startoftext|>" ,lowerCamelCase : Dict="<|endoftext|>" ,lowerCamelCase : int=False ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase ,pad_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,do_clean_text=lowerCamelCase ,**lowerCamelCase ,)
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__SCREAMING_SNAKE_CASE = do_clean_text
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_vocab_and_emoji(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : str ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowerCamelCase ,clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(lowerCamelCase ,self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : "Conversation" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if os.path.isdir(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__SCREAMING_SNAKE_CASE = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__SCREAMING_SNAKE_CASE = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(""",""".join(lowerCamelCase ) + """\n""" )
index += 1
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
json.dump(self.emoji ,lowerCamelCase )
return vocab_file, emoji_file
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : List[str] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab # same as swe
__SCREAMING_SNAKE_CASE = ids_to_tokens # same as bpe
__SCREAMING_SNAKE_CASE = emoji
__SCREAMING_SNAKE_CASE = np.max([len(lowerCamelCase ) for w in self.vocab.keys()] )
__SCREAMING_SNAKE_CASE = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__SCREAMING_SNAKE_CASE = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__SCREAMING_SNAKE_CASE = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__SCREAMING_SNAKE_CASE = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__SCREAMING_SNAKE_CASE = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__SCREAMING_SNAKE_CASE = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<URL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<EMAIL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<TEL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<PRICE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__SCREAMING_SNAKE_CASE = content.replace("""<BLOCK><BLOCK>""" ,"""<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" )
__SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\r\n""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\n""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\r""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\t""" ,"""<TAB>""" )
__SCREAMING_SNAKE_CASE = text.replace("""—""" ,"""ー""" )
__SCREAMING_SNAKE_CASE = text.replace("""−""" ,"""ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__SCREAMING_SNAKE_CASE = text.replace(lowerCamelCase ,lowerCamelCase )
if clean:
__SCREAMING_SNAKE_CASE = self.clean_text(lowerCamelCase )
def check_simbol(lowerCamelCase : List[str] ):
__SCREAMING_SNAKE_CASE = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2:
__SCREAMING_SNAKE_CASE = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2A1 and c <= 0xC_2BF)
or (c >= 0xC_780 and c <= 0xC_783)
or (c >= 0xC_AB9 and c <= 0xC_BBF)
or (c >= 0xC_C80 and c <= 0xC_DA2)
):
return True
return False
def checkuae(lowerCamelCase : Union[str, Any] ):
__SCREAMING_SNAKE_CASE = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3:
__SCREAMING_SNAKE_CASE = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE28_080 and c <= 0xE2B_07F:
return True
return False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = []
while pos < len(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = min(len(lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__SCREAMING_SNAKE_CASE = [] # (token_id, token, pos)
for e in range(lowerCamelCase ,lowerCamelCase ,-1 ):
__SCREAMING_SNAKE_CASE = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase ) > 2:
__SCREAMING_SNAKE_CASE = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCamelCase ) > 0:
# the smallest token_id is adopted
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted(lowerCamelCase ,key=lambda lowerCamelCase : x[0] )[0]
result.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = e
else:
__SCREAMING_SNAKE_CASE = pos + 1
__SCREAMING_SNAKE_CASE = text[pos:end]
if check_simbol(lowerCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__SCREAMING_SNAKE_CASE = end
return result
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]="\n" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) )
__SCREAMING_SNAKE_CASE = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) )
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase )
return text
| 109 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : int = self.get_input_output_texts(a__)
_lowerCamelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__)
return text, ids
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file)
_lowerCamelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : int = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : List[str] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : int = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = MecabTokenizer(do_lower_case=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MecabTokenizer(normalize_text=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(a__)
_lowerCamelCase : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Tuple = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : str = pickle.load(a__)
_lowerCamelCase : Any = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : List[str] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : Optional[Any] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer(do_lower_case=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = JumanppTokenizer(normalize_text=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = JumanppTokenizer(trim_whitespace=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : Optional[Any] = i
_lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
_lowerCamelCase : str = tokenizer.subword_tokenizer
_lowerCamelCase : Any = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(a__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
_lowerCamelCase : str = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(a__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
_lowerCamelCase : int = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , **a__):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
a__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : List[Any] = i
_lowerCamelCase : Union[str, Any] = CharacterTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
_lowerCamelCase : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Optional[int] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(a__)
self.assertIsInstance(a__ , a__)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
_lowerCamelCase : List[Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 613 |
import math
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = len(lowercase_ )
_lowerCamelCase : Optional[int] = int(math.floor(math.sqrt(lowercase_ ) ) )
_lowerCamelCase : Tuple = 0
while arr[min(lowercase_ , lowercase_ ) - 1] < x:
_lowerCamelCase : Optional[Any] = step
step += int(math.floor(math.sqrt(lowercase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowerCamelCase : int = prev + 1
if prev == min(lowercase_ , lowercase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
_lowerCamelCase = int(input('Enter the number to be searched:\n'))
_lowerCamelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'''Number {x} is at index {res}''')
| 613 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained(
UpperCAmelCase__ ,architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=UpperCAmelCase__ ,filename='pytorch_model.bin' ) )
_SCREAMING_SNAKE_CASE = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_SCREAMING_SNAKE_CASE = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_SCREAMING_SNAKE_CASE = tensor_value
_SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase__ ,config=UpperCAmelCase__ ,state_dict=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
# convert tokenizer
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
tokenizer.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case : str = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 605 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def __magic_name__ ( self )-> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = SpeechTaTokenizer(A_ )
_SCREAMING_SNAKE_CASE = AddedToken('<mask>' , lstrip=A_ , rstrip=A_ )
_SCREAMING_SNAKE_CASE = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = 'this is a test'
_SCREAMING_SNAKE_CASE = 'this is a test'
return input_text, output_text
def __magic_name__ ( self , A_ , A_=False , A_=20 , A_=5 )-> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_input_output_texts(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A_ , add_special_tokens=A_ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = '<pad>'
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(A_ ) , 81 )
def __magic_name__ ( self )-> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_SCREAMING_SNAKE_CASE = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_SCREAMING_SNAKE_CASE = tokenizer.add_tokens(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
_SCREAMING_SNAKE_CASE = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_SCREAMING_SNAKE_CASE = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_SCREAMING_SNAKE_CASE = tokenizer.add_special_tokens(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
_SCREAMING_SNAKE_CASE = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self )-> List[str]:
pass
def __magic_name__ ( self )-> Dict:
pass
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __magic_name__ ( self )-> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
_SCREAMING_SNAKE_CASE = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_SCREAMING_SNAKE_CASE = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A_ , )
| 605 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __lowerCAmelCase ( A_ : int , A_ : Optional[int] , A_ : int=8 ) -> List[Any]:
__UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: UNetaDConditionModel , __lowerCAmelCase: DDPMScheduler , __lowerCAmelCase: VQModel , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
__UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
if latents is None:
__UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__UpperCAmelCase = latents.to(__lowerCAmelCase )
__UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[int]=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__UpperCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
__UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Tuple=0 ) -> str:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__UpperCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
__UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Optional[int] , __lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase: torch.FloatTensor , __lowerCAmelCase: int = 512 , __lowerCAmelCase: int = 512 , __lowerCAmelCase: int = 100 , __lowerCAmelCase: float = 4.0 , __lowerCAmelCase: int = 1 , __lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase: Optional[torch.FloatTensor] = None , __lowerCAmelCase: Optional[str] = "pil" , __lowerCAmelCase: bool = True , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self._execution_device
__UpperCAmelCase = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = torch.cat(__lowerCAmelCase , dim=0 )
__UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase = hint.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
__UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCAmelCase = self.scheduler.timesteps
__UpperCAmelCase = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
# create initial latent
__UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase = {"image_embeds": image_embeds, "hint": hint}
__UpperCAmelCase = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase , __UpperCAmelCase = noise_pred.chunk(2 )
__UpperCAmelCase , __UpperCAmelCase = variance_pred.chunk(2 )
__UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
__UpperCAmelCase = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__UpperCAmelCase = image * 0.5 + 0.5
__UpperCAmelCase = image.clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 286 | import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase__ : List[str] = False
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__UpperCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Dict=False , __lowerCAmelCase: Union[str, Any]=20 , __lowerCAmelCase: Tuple=5 ) -> Tuple[str, list]:
'''simple docstring'''
__UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )]
__UpperCAmelCase = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
__UpperCAmelCase = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
__UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
__UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
__UpperCAmelCase = " " + output_txt
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self: str , **__lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__UpperCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__UpperCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__UpperCAmelCase = tokenizer("maɪ c" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [3, 200] ) # mai should be <unk> (=3)
def _UpperCAmelCase ( self: int ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: Any ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="en-us" ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowerCAmelCase , "ɛ l o h aʊ a ʁ j u" )
def _UpperCAmelCase ( self: Tuple ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how Are you"
__UpperCAmelCase = "hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase: List[Any] , __lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[int] ):
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) )
# transform list to ModelOutput
__UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
[recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase )
__UpperCAmelCase = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _UpperCAmelCase ( self: Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: str ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCAmelCase = tokenizer.add_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCAmelCase = tokenizer.add_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Dict ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__UpperCAmelCase = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(output["text"] , __lowerCAmelCase )
| 286 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase( lowerCamelCase ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase( self ):
raise NotImplementedError()
| 672 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if "model" in orig_key:
_lowercase : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
_lowercase : Dict = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
_lowercase : Optional[Any] = orig_key.split('.' )[0].split('_' )[-1]
_lowercase : int = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowercase : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
_lowercase : Any = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
_lowercase : Dict = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
_lowercase : Optional[int] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
_lowercase : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
_lowercase : Optional[int] = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
_lowercase : Any = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
_lowercase : List[str] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
_lowercase : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
_lowercase : List[Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
_lowercase : str = 'yoso.' + orig_key
return orig_key
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
for key in orig_state_dict.copy().keys():
_lowercase : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowercase : Tuple = val
_lowercase : Union[str, Any] = orig_state_dict['cls.predictions.decoder.bias']
_lowercase : int = torch.arange(SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
_lowercase : Any = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = YosoForMaskedLM(SCREAMING_SNAKE_CASE )
_lowercase : str = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE )
print(model.load_state_dict(SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( __snake_case = 1_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 2
for i in range(2, max_n + 1 ):
_UpperCamelCase = pre_numerator
_UpperCamelCase = 2 * i // 3 if i % 3 == 0 else 1
_UpperCamelCase = cur_numerator
_UpperCamelCase = e_cont * pre_numerator + temp
return sum_digits(__lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
def a__ (__lowercase :int , __lowercase :int ) -> int:
return abs(__lowercase ) if a == 0 else greatest_common_divisor(b % a , __lowercase )
def a__ (__lowercase :int , __lowercase :int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_A , _A : Union[str, Any] = y, x % y
return abs(__lowercase )
def a__ () -> Optional[Any]:
try:
_A : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
_A : List[Any] = int(nums[0] )
_A : Optional[int] = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(__lowercase , __lowercase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowercase , __lowercase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 206 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self ):
# test for the above condition
self.test()
def snake_case_ ( self ):
__a = 0
__a = False
while not completed:
if counter == 1:
self.reset()
__a = self.advance()
if not self.does_advance(__A ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__a , __a , __a = self.update(__A )
counter += 1
if counter > 10000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def snake_case_ ( self ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case_ ( self , __A ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case_ ( self , __A ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case_ ( self ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case_ ( self ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def snake_case_ ( self , __A=False ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super(__A , self ).__init__()
if not isinstance(__A , __A ) or len(__A ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(__A , __A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__a = token_ids
__a = len(self.token_ids )
__a = -1 # the index of the currently fulfilled step
__a = False
def snake_case_ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__A )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__A )}''' )
__a = False
__a = False
__a = False
if self.does_advance(__A ):
self.fulfilled_idx += 1
__a = True
if self.fulfilled_idx == (self.seqlen - 1):
__a = True
__a = completed
else:
# failed to make progress.
__a = True
self.reset()
return stepped, completed, reset
def snake_case_ ( self ):
__a = False
__a = 0
def snake_case_ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case_ ( self , __A=False ):
__a = PhrasalConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.fulfilled_idx
__a = self.completed
return new_constraint
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A=True ):
__a = max([len(__A ) for one in nested_token_ids] )
__a = {}
for token_ids in nested_token_ids:
__a = root
for tidx, token_id in enumerate(__A ):
if token_id not in level:
__a = {}
__a = level[token_id]
if no_subsets and self.has_subsets(__A , __A ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
__a = root
def snake_case_ ( self , __A ):
__a = self.trie
for current_token in current_seq:
__a = start[current_token]
__a = list(start.keys() )
return next_tokens
def snake_case_ ( self , __A ):
__a = self.next_tokens(__A )
return len(__A ) == 0
def snake_case_ ( self , __A ):
__a = list(root.values() )
if len(__A ) == 0:
return 1
else:
return sum([self.count_leaves(__A ) for nn in next_nodes] )
def snake_case_ ( self , __A , __A ):
__a = self.count_leaves(__A )
return len(__A ) != leaf_count
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super(__A , self ).__init__()
if not isinstance(__A , __A ) or len(__A ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(__A , __A ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(__A , __A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__a = DisjunctiveTrie(__A )
__a = nested_token_ids
__a = self.trie.max_height
__a = []
__a = False
def snake_case_ ( self ):
__a = self.trie.next_tokens(self.current_seq )
if len(__A ) == 0:
return None
else:
return token_list
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}''' )
__a = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__A )}''' )
__a = False
__a = False
__a = False
if self.does_advance(__A ):
self.current_seq.append(__A )
__a = True
else:
__a = True
self.reset()
__a = self.trie.reached_leaf(self.current_seq )
__a = completed
return stepped, completed, reset
def snake_case_ ( self ):
__a = False
__a = []
def snake_case_ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case_ ( self , __A=False ):
__a = DisjunctiveConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.current_seq
__a = self.completed
return new_constraint
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = constraints
# max # of steps required to fulfill a given constraint
__a = max([c.seqlen for c in constraints] )
__a = len(__A )
__a = False
self.init_state()
def snake_case_ ( self ):
__a = []
__a = None
__a = [constraint.copy(stateful=__A ) for constraint in self.constraints]
def snake_case_ ( self ):
__a = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case_ ( self ):
__a = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a = constraint.advance()
if isinstance(__A , __A ):
token_list.append(__A )
elif isinstance(__A , __A ):
token_list.extend(__A )
else:
__a = self.inprogress_constraint.advance()
if isinstance(__A , __A ):
token_list.append(__A )
elif isinstance(__A , __A ):
token_list.extend(__A )
if len(__A ) == 0:
return None
else:
return token_list
def snake_case_ ( self , __A ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a , __a = self.add(__A )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__a , __a = False, False
if self.completed:
__a = True
__a = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a , __a , __a = self.inprogress_constraint.update(__A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__A ) )
__a = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__a = None
if len(self.pending_constraints ) == 0:
# we're done!
__a = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__A ):
__a , __a , __a = pending_constraint.update(__A )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(__A )
__a = None
if not complete and stepped:
__a = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case_ ( self , __A=True ):
__a = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a = [
constraint.copy(stateful=__A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a = self.inprogress_constraint.copy(stateful=__A )
__a = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 209 |
from math import pow
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__a = int(pow(lowerCAmelCase__ , lowerCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
return current_sum, solutions_count
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCAmelCase__ , lowerCAmelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''conditional_detr'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=3_0_0 , _lowercase=6 , _lowercase=2_0_4_8 , _lowercase=8 , _lowercase=6 , _lowercase=2_0_4_8 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=2_5_6 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=0.25 , **_lowercase , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = backbone_config.get("""model_type""" )
snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Any = config_class.from_dict(_lowercase )
snake_case_ : Optional[int] = use_timm_backbone
snake_case_ : Dict = backbone_config
snake_case_ : List[str] = num_channels
snake_case_ : List[str] = num_queries
snake_case_ : List[Any] = d_model
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : Union[str, Any] = encoder_layers
snake_case_ : List[str] = encoder_attention_heads
snake_case_ : str = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : int = decoder_attention_heads
snake_case_ : str = dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : List[Any] = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : Union[str, Any] = init_std
snake_case_ : Union[str, Any] = init_xavier_std
snake_case_ : int = encoder_layerdrop
snake_case_ : Union[str, Any] = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : Tuple = auxiliary_loss
snake_case_ : Union[str, Any] = position_embedding_type
snake_case_ : Optional[int] = backbone
snake_case_ : Any = use_pretrained_backbone
snake_case_ : Optional[int] = dilation
# Hungarian matcher
snake_case_ : int = class_cost
snake_case_ : Optional[int] = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : str = mask_loss_coefficient
snake_case_ : Optional[int] = dice_loss_coefficient
snake_case_ : int = cls_loss_coefficient
snake_case_ : Any = bbox_loss_coefficient
snake_case_ : Any = giou_loss_coefficient
snake_case_ : int = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Tuple = self.backbone_config.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_2
| 58 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self : Dict , A : Tuple , A : Union[str, Any]=13 , A : Dict=7 , A : Union[str, Any]=True , A : int=True , A : int=False , A : int=True , A : List[Any]=99 , A : List[str]=32 , A : Dict=5 , A : int=4 , A : Union[str, Any]=37 , A : Any="gelu" , A : List[Any]=0.1 , A : Dict=0.1 , A : Optional[Any]=5_12 , A : List[str]=16 , A : Tuple=2 , A : List[Any]=0.02 , A : int=3 , A : Any=4 , A : Tuple=None , ) -> int:
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Optional[int] = is_training
lowercase_ : int = use_input_mask
lowercase_ : List[str] = use_token_type_ids
lowercase_ : Optional[Any] = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = num_choices
lowercase_ : List[str] = scope
def A ( self : Dict ) -> Dict:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Any = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Union[str, Any] ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def A ( self : Optional[Any] , A : Tuple , A : str , A : Union[str, Any] , A : Dict , A : Tuple , A : Any , A : Any ) -> List[str]:
lowercase_ : str = OpenLlamaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(A , attention_mask=A )
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Any , A : Dict , A : int , A : Optional[Any] , A : str , A : Union[str, Any] , A : List[Any] , A : Optional[int] , A : List[Any] , A : Tuple , ) -> int:
lowercase_ : int = True
lowercase_ : Tuple = OpenLlamaModel(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowercase_ : Optional[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , A : Union[str, Any] , A : List[str] , A : str , A : Any , A : Optional[Any] , A : Tuple , A : str , A : int , A : Any , ) -> Optional[int]:
lowercase_ : Dict = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowercase_ : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] , A : Tuple , A : Union[str, Any] , A : int , A : Tuple , A : List[str] , A : str , ) -> Tuple:
lowercase_ : List[Any] = True
lowercase_ : List[str] = True
lowercase_ : Tuple = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowercase_ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : str = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowercase_ : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowercase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = config_and_inputs
lowercase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def A ( self : Dict ) -> List[str]:
lowercase_ : Optional[Any] = OpenLlamaModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A ( self : int ) -> Optional[Any]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> int:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : List[Any] = type
self.model_tester.create_and_check_model(*A )
def A ( self : Dict ) -> str:
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = 3
lowercase_ : Any = input_dict['''input_ids''']
lowercase_ : Tuple = input_ids.ne(1 ).to(A )
lowercase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = 3
lowercase_ : Optional[Any] = '''single_label_classification'''
lowercase_ : str = input_dict['''input_ids''']
lowercase_ : Tuple = input_ids.ne(1 ).to(A )
lowercase_ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Optional[int] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[str] ) -> Tuple:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = 3
lowercase_ : List[Any] = '''multi_label_classification'''
lowercase_ : List[str] = input_dict['''input_ids''']
lowercase_ : str = input_ids.ne(1 ).to(A )
lowercase_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : List[str] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def A ( self : Union[str, Any] ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def A ( self : List[Any] , A : List[str] ) -> int:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
lowercase_ : Dict = original_model(A ).last_hidden_state
lowercase_ : Optional[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase_ : List[str] = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowercase_ : Dict = scaled_model(A ).last_hidden_state
lowercase_ : int = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 231 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "new-model"
if is_tf_available():
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = NewModelConfig
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'bert-base-cased'
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'bert-base-cased'
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Optional[int] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
@require_tensorflow_probability
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : str = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4410 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4410 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCAmelCase : Union[str, Any] = ['FunnelBaseModel']
_lowerCAmelCase : List[str] = TFAutoModel.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' ,_A )
_lowerCAmelCase : Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
auto_class.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCAmelCase : Optional[int] = NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase : str = auto_class.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
_lowerCAmelCase : Union[str, Any] = auto_class.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('bert-base' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(_A ,revision='aaaaaa' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
_lowerCAmelCase : Any = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_A ,'Use `from_pt=True` to load this model' ):
_lowerCAmelCase : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
_lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCAmelCase : List[str] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [n]
for i in range(1 , len(lowerCAmelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(str(lowerCAmelCase_ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ):
return False
return True
def UpperCAmelCase__ (lowerCAmelCase_ = 11 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 13
while len(lowerCAmelCase_ ) != count:
if validate(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = list_truncated_nums(lowerCAmelCase_ )
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase_ )
num += 2
return list_truncated_primes
def UpperCAmelCase__ ():
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(1_1)) = }")
| 682 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# create the counting array
__SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min
__SCREAMING_SNAKE_CASE = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__SCREAMING_SNAKE_CASE = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip()
a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 682 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCamelCase : Tuple = '''bert-base-cased'''
_lowerCamelCase : Optional[int] = '''google/pegasus-xsum'''
_lowerCamelCase : Tuple = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_lowerCamelCase : Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_lowerCamelCase : List[Any] = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : Optional[int] = '''sshleifer/bart-tiny-random'''
_lowerCamelCase : str = '''sshleifer/tiny-mbart'''
_lowerCamelCase : List[str] = '''sshleifer/tiny-marian-en-de'''
def __lowerCamelCase (UpperCAmelCase__ : Path , UpperCAmelCase__ : list ):
SCREAMING_SNAKE_CASE = "\n".join(UpperCAmelCase__ )
Path(UpperCAmelCase__ ).open("w" ).writelines(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCAmelCase__ , F"{split}.source" ) , UpperCAmelCase__ )
_dump_articles(os.path.join(UpperCAmelCase__ , F"{split}.target" ) , UpperCAmelCase__ )
return tmp_dir
class lowercase ( a ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __snake_case( self : str , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE = SeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path="train" , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __snake_case( self : str , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = LegacySeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path="train" , max_source_length=20 , max_target_length=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
SCREAMING_SNAKE_CASE = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE = tmp_dir.joinpath("train.source" ).open().readlines()
SCREAMING_SNAKE_CASE = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_UpperCamelCase , _UpperCamelCase , 128 , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCamelCase ) < len(_UpperCamelCase )
assert len(_UpperCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(_UpperCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __snake_case( self : str ) -> str:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = ds.make_dynamic_sampler(_UpperCamelCase , required_batch_size_multiple=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [len(_UpperCamelCase ) for x in batch_sampler]
assert len(set(_UpperCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCamelCase ) == len(_UpperCamelCase ) # no dropped or added examples
SCREAMING_SNAKE_CASE = DataLoader(_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for batch in data_loader:
SCREAMING_SNAKE_CASE = batch["input_ids"].shape
SCREAMING_SNAKE_CASE = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_UpperCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCamelCase )
assert num_src_per_batch[0] == max(_UpperCamelCase )
if failures:
raise AssertionError(F"too many tokens in {len(_UpperCamelCase )} batches" )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = ds.make_sortish_sampler(_UpperCamelCase , shuffle=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.pad_token_id
def count_pad_tokens(_UpperCamelCase : Dict , _UpperCamelCase : Dict="input_ids" ):
return [batch[k].eq(_UpperCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCamelCase , k="labels" ) ) < sum(count_pad_tokens(_UpperCamelCase , k="labels" ) )
assert sum(count_pad_tokens(_UpperCamelCase ) ) < sum(count_pad_tokens(_UpperCamelCase ) )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
def __snake_case( self : str , _UpperCamelCase : str=1_000 , _UpperCamelCase : List[Any]=128 ) -> Tuple:
'''simple docstring'''
if os.getenv("USE_REAL_DATA" , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = "examples/seq2seq/wmt_en_ro"
SCREAMING_SNAKE_CASE = max_len * 2 * 64
if not Path(_UpperCamelCase ).joinpath("train.len" ).exists():
save_len_file(_UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = "examples/seq2seq/test_data/wmt_en_ro"
SCREAMING_SNAKE_CASE = max_len * 4
save_len_file(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = SeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path="train" , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , n_obs=_UpperCamelCase , )
return ds, max_tokens, tokenizer
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_dataset()
SCREAMING_SNAKE_CASE = set(DistributedSortishSampler(_UpperCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = set(DistributedSortishSampler(_UpperCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCamelCase ) )
assert idsa.intersection(_UpperCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __snake_case( self : int , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE = SeqaSeqDataset(
_UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
SCREAMING_SNAKE_CASE = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE = SeqaSeqDataset(
_UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCamelCase ) == 1 if tok_name == BART_TINY else len(_UpperCamelCase ) == 0
| 647 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = None
__magic_name__ = None
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
def is_valid_tree(_SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> str:
'''simple docstring'''
__a = np.random.RandomState(_snake_case )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
__a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__a = prompt_embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * ['''this is a negative prompt''']
__a = negative_prompt
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = []
for p in [prompt, negative_prompt]:
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__a , __a = embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = 0
def test_callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
__a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__a = False
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''Andromeda galaxy in a bottle'''
__a = np.random.RandomState(0 )
pipe(
prompt=_snake_case , num_inference_steps=5 , guidance_scale=7.5 , generator=_snake_case , callback=_snake_case , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_snake_case , _snake_case )
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__a = OnnxStableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 219 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE : List[str] = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 554 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = "cpu" ,lowerCAmelCase__ = None ):
A__ = torch.load(lowerCAmelCase__ ,map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ ,torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 554 | 1 |
def lowerCamelCase__ ( a : int ) -> List[Any]:
"""simple docstring"""
a__ :str = [1]
a__ :int = 0, 0, 0
a__ :Tuple = ugly_nums[ia] * 2
a__ :Dict = ugly_nums[ia] * 3
a__ :List[Any] = ugly_nums[ia] * 5
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
a__ :Dict = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
ugly_nums.append(SCREAMING_SNAKE_CASE_ )
if next_num == next_a:
ia += 1
a__ :Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
a__ :List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
a__ :Dict = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 395 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """luke"""
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = entity_vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = use_entity_aware_attention
lowercase__ : Union[str, Any] = classifier_dropout
| 164 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = ""
A : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A : str = None # compression type in fsspec. ex: "gzip"
A : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : int):
"""simple docstring"""
super().__init__(self , **UpperCAmelCase_)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a : str = fsspec.open(
UpperCAmelCase_ , mode='rb' , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a : Dict = os.path.basename(self.file.path.split('::')[0])
a : List[str] = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
a : List[Any] = None
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , UpperCAmelCase_ : str):
"""simple docstring"""
return super()._strip_protocol(UpperCAmelCase_).lstrip('/')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if self.dir_cache is None:
a : Union[str, Any] = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
a : Any = {f['name']: f}
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : str):
"""simple docstring"""
return self.file.open().read()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : Dict = self._strip_protocol(UpperCAmelCase_)
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""")
return self.file.open()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "bz2"
A : Tuple = "bz2"
A : Any = ".bz2"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "gzip"
A : List[Any] = "gzip"
A : str = ".gz"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "lz4"
A : Tuple = "lz4"
A : List[str] = ".lz4"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[Any] = "xz"
A : Union[str, Any] = "xz"
A : str = ".xz"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = "zstd"
A : Union[str, Any] = "zstd"
A : str = ".zst"
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a : List[Any] = self.file.__enter__
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = file_
def __enter__( self : int):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def __iter__( self : List[Any]):
"""simple docstring"""
return iter(self._file)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return next(self._file)
def __getattr__( self : Dict , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return getattr(self._file , UpperCAmelCase_)
def fixed_enter(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_))
a : Union[str, Any] = fixed_enter
| 610 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "yolos"
def __init__( self : str , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : Union[str, Any]=[5_1_2, 8_6_4] , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=1_0_0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Union[str, Any] = hidden_size
a : int = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = intermediate_size
a : int = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Dict = initializer_range
a : Union[str, Any] = layer_norm_eps
a : str = image_size
a : Any = patch_size
a : Union[str, Any] = num_channels
a : int = qkv_bias
a : Union[str, Any] = num_detection_tokens
a : Optional[int] = use_mid_position_embeddings
a : str = auxiliary_loss
# Hungarian matcher
a : Optional[Any] = class_cost
a : Union[str, Any] = bbox_cost
a : List[str] = giou_cost
# Loss coefficients
a : Any = bbox_loss_coefficient
a : Optional[Any] = giou_loss_coefficient
a : Union[str, Any] = eos_coefficient
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return 1_2
| 610 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case ( _a: List[str]=None )-> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
lowerCamelCase__ = subparsers.add_parser('env' )
else:
lowerCamelCase__ = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=_a , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def snake_case ( _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = torch.__version__
lowerCamelCase__ = torch.cuda.is_available()
lowerCamelCase__ = is_xpu_available()
lowerCamelCase__ = is_npu_available()
lowerCamelCase__ = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_a ):
lowerCamelCase__ = load_config_from_file(args.config_file ).to_dict()
lowerCamelCase__ = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'PyTorch XPU available': str(_a ),
'PyTorch NPU available': str(_a ),
'System RAM': F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
lowerCamelCase__ = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
lowerCamelCase__ = (
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else F'\t{accelerate_config}'
)
print(_a )
lowerCamelCase__ = accelerate_config
return info
def snake_case ( )-> int:
'''simple docstring'''
lowerCamelCase__ = env_command_parser()
lowerCamelCase__ = parser.parse_args()
env_command(_a )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 510 |
"""simple docstring"""
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = size
lowerCamelCase__ = [0] * size
lowerCamelCase__ = [0] * size
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
return index | (index + 1)
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
return (index & (index + 1)) - 1
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = value
while index < self.size:
lowerCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
lowerCamelCase__ = value
else:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_next(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
right -= 1 # Because of right is exclusive
lowerCamelCase__ = 0
while left <= right:
lowerCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
lowerCamelCase__ = current_left
else:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowerCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['prompt']
lowerCamelCase_ = inputs['generator']
lowerCamelCase_ = inputs['num_inference_steps']
lowerCamelCase_ = inputs['output_type']
if "image" in inputs:
lowerCamelCase_ = inputs['image']
else:
lowerCamelCase_ = None
if "mask_image" in inputs:
lowerCamelCase_ = inputs['mask_image']
else:
lowerCamelCase_ = None
if "original_image" in inputs:
lowerCamelCase_ = inputs['original_image']
else:
lowerCamelCase_ = None
lowerCamelCase_ ,lowerCamelCase_ = pipe.encode_prompt(SCREAMING_SNAKE_CASE_ )
# inputs with prompt converted to embeddings
lowerCamelCase_ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCamelCase_ = image
if mask_image is not None:
lowerCamelCase_ = mask_image
if original_image is not None:
lowerCamelCase_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['generator']
lowerCamelCase_ = inputs['num_inference_steps']
lowerCamelCase_ = inputs['output_type']
# inputs with prompt converted to embeddings
lowerCamelCase_ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCamelCase_ = image
if mask_image is not None:
lowerCamelCase_ = mask_image
if original_image is not None:
lowerCamelCase_ = original_image
lowerCamelCase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1E-4 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1E-4 )
| 384 |
'''simple docstring'''
A_ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
A_ = {value: key for key, value in encode_dict.items()}
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _UpperCamelCase ( __UpperCamelCase ) -> str:
if set(__UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowerCamelCase_ = ''
for word in coded.split():
while len(__UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 384 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : int=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ) -> Dict:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : List[Any] = batch_size
snake_case : Dict = seq_length
snake_case : Tuple = is_training
snake_case : Dict = use_input_mask
snake_case : Optional[int] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Dict = vocab_size
snake_case : Any = hidden_size
snake_case : str = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Any = num_labels
snake_case : int = num_choices
snake_case : Tuple = scope
snake_case : Dict = vocab_size - 1
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Dict = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Dict = self.prepare_config_and_inputs()
snake_case : Any = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = GPTNeoXModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
snake_case : Any = True
snake_case : int = GPTNeoXModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = GPTNeoXForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[Any] = GPTNeoXForQuestionAnswering(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : List[str] = GPTNeoXForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : Union[str, Any] = GPTNeoXForTokenClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = True
snake_case : List[str] = GPTNeoXForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
snake_case : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
snake_case : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
snake_case : Union[str, Any] = output_from_no_past['''hidden_states'''][0]
snake_case : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case : List[str] = config_and_inputs
snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = GPTNeoXModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=64 , num_attention_heads=8 )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case : List[str] = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
snake_case : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Union[str, Any] = GPTNeoXModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
snake_case : Optional[Any] = original_model(UpperCamelCase__ ).last_hidden_state
snake_case : int = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case : List[str] = GPTNeoXModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
snake_case : Any = scaled_model(UpperCamelCase__ ).last_hidden_state
snake_case : List[str] = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case : Any = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case : Any = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(UpperCamelCase__ )
snake_case : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(UpperCamelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case : Optional[int] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case : List[Any] = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 )
snake_case : Tuple = tokenizer.batch_decode(UpperCamelCase__ )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 638 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
if "resnet-50" in model_name:
snake_case : int = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
snake_case : Any = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
snake_case : Optional[Any] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
snake_case : Tuple = '''panoptic''' in model_name
if is_panoptic:
snake_case : Tuple = 250
else:
snake_case : Any = 91
snake_case : str = '''huggingface/label-files'''
snake_case : Optional[Any] = '''coco-detection-id2label.json'''
snake_case : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ''''''
if is_panoptic:
snake_case : Optional[Any] = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : Any = in_proj_bias[:256]
snake_case : Dict = in_proj_weight[256:512, :]
snake_case : Tuple = in_proj_bias[256:512]
snake_case : Optional[int] = in_proj_weight[-256:, :]
snake_case : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case : Union[str, Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : List[str] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : str = in_proj_bias[:256]
snake_case : Optional[int] = in_proj_weight[256:512, :]
snake_case : Optional[int] = in_proj_bias[256:512]
snake_case : Optional[Any] = in_proj_weight[-256:, :]
snake_case : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case : Union[str, Any] = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case : str = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case : Optional[Any] = in_proj_weight_cross_attn[:256, :]
snake_case : List[Any] = in_proj_bias_cross_attn[:256]
snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
snake_case : Any = in_proj_bias_cross_attn[256:512]
snake_case : List[str] = in_proj_weight_cross_attn[-256:, :]
snake_case : Tuple = in_proj_bias_cross_attn[-256:]
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ) -> Dict:
'''simple docstring'''
snake_case ,snake_case : Tuple = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
snake_case : Union[str, Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'Converting model {model_name}...' )
snake_case : Union[str, Any] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
snake_case : Union[str, Any] = '''detr.''' + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : List[str] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case : int = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Any = val
# finally, create HuggingFace model and load state dict
snake_case : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
snake_case : List[str] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case : Optional[int] = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
snake_case : int = processor(images=prepare_img() , return_tensors='''pt''' )
snake_case : Optional[int] = encoding['''pixel_values''']
snake_case : str = detr(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowercase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase : int = 16
UpperCamelCase : int = 32
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : int , snake_case : str , snake_case : Union[str, Any] = 16 ) -> Optional[int]:
"""simple docstring"""
a : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
a : int = DatasetDict(
{
'train': dataset['train'].select(snake_case ),
'validation': dataset['train'].select(snake_case ),
'test': dataset['validation'],
} )
def tokenize_function(snake_case : Any ):
# max_length=None => use the model max length (it's actually the default)
a : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a : Union[str, Any] = datasets.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
a : Dict = 8
else:
a : Union[str, Any] = None
return tokenizer.pad(
snake_case , padding='longest' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
a : int = DataLoader(
tokenized_datasets['test'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
a : Optional[int] = []
# Download the dataset
a : Union[str, Any] = load_dataset('glue' , 'mrpc' )
# Create our splits
a : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
a : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a : Dict = config['lr']
a : Any = int(config['num_epochs'] )
a : List[str] = int(config['seed'] )
a : int = int(config['batch_size'] )
a : int = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
a : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a : Any = batch_size // MAX_GPU_BATCH_SIZE
a : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(snake_case )
# New Code #
# Create our folds:
a : List[str] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
a : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case ):
a , a , a : Any = get_fold_dataloaders(
snake_case , snake_case , snake_case , snake_case , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a : Dict = model.to(accelerator.device )
# Instantiate optimizer
a : Any = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
a : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a : Union[str, Any] = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a : Dict = model(**snake_case )
a : Union[str, Any] = outputs.loss
a : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a : Tuple = model(**snake_case )
a : List[str] = outputs.logits.argmax(dim=-1 )
a , a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case )
# New Code #
# We also run predictions on the test set at the very end
a : str = []
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a : Dict = model(**snake_case )
a : Dict = outputs.logits
a , a : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
a : Any = torch.cat(snake_case , dim=0 )
a : Any = torch.stack(snake_case , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
a : str = metric.compute(predictions=snake_case , references=snake_case )
accelerator.print('Average test metrics from all folds:' , snake_case )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case , default=snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=snake_case , default=3 , help='The number of splits to perform across the dataset' )
a : Tuple = parser.parse_args()
a : List[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 709 | '''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : int = ZeroShotClassificationPipeline(
model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , candidate_labels=['polics', 'health'])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels='politics')
self.assertEqual(UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_)]})
# No kwarg
a : str = classifier('Who are you voting for in 2020?' , ['politics'])
self.assertEqual(UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_)]})
a : int = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'])
self.assertEqual(UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_)]})
a : Any = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health')
self.assertEqual(
UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0)
a : str = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'])
self.assertEqual(
UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0)
a : List[Any] = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}')
self.assertEqual(UpperCAmelCase_ , {'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_)]})
# https://github.com/huggingface/transformers/issues/13846
a : List[str] = classifier(['I am happy'] , ['positive', 'negative'])
self.assertEqual(
UpperCAmelCase_ , [
{'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)]}
for i in range(1)
] , )
a : Optional[int] = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'])
self.assertEqual(
UpperCAmelCase_ , [
{'sequence': ANY(UpperCAmelCase_), 'labels': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)], 'scores': [ANY(UpperCAmelCase_), ANY(UpperCAmelCase_)]}
for i in range(2)
] , )
with self.assertRaises(UpperCAmelCase_):
classifier('' , candidate_labels='politics')
with self.assertRaises(UpperCAmelCase_):
classifier(UpperCAmelCase_ , candidate_labels='politics')
with self.assertRaises(UpperCAmelCase_):
classifier('Who are you voting for in 2020?' , candidate_labels='')
with self.assertRaises(UpperCAmelCase_):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase_):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase_ , )
self.run_entailment_id(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Pipeline):
"""simple docstring"""
a : Union[str, Any] = zero_shot_classifier.model.config
a : Optional[Any] = config.labelaid
a : str = zero_shot_classifier.entailment_id
a : Optional[int] = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
a : Any = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
a : Optional[Any] = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
a : int = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
a : List[str] = original_labelaid
self.assertEqual(UpperCAmelCase_ , zero_shot_classifier.entailment_id)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Union[str, Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'])
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
a : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
a : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt')
a : List[Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
a : str = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase_ , )
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf')
a : str = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
a : List[Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase_ , )
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 610 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> bool:
'''simple docstring'''
snake_case__ : Union[str, Any] = get_failure_array(__magic_name__ )
# 2) Step through text searching for pattern
snake_case__ , snake_case__ : List[str] = 0, 0 # index into text, pattern
while i < len(__magic_name__ ):
if pattern[j] == text[i]:
if j == (len(__magic_name__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case__ : Dict = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase__ ( __magic_name__ : str ) -> list[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [0]
snake_case__ : int = 0
snake_case__ : Any = 1
while j < len(__magic_name__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case__ : List[str] = failure[i - 1]
continue
j += 1
failure.append(__magic_name__ )
return failure
if __name__ == "__main__":
# Test 1)
A_ : Optional[Any] = "abc1abc12"
A_ : List[str] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
A_ : Union[str, Any] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A_ : Dict = "ABABX"
A_ : int = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
A_ : List[Any] = "AAAB"
A_ : List[str] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
A_ : Optional[int] = "abcdabcy"
A_ : List[str] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
A_ : Tuple = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(_lowercase )
class a__ ( _lowercase ):
__magic_name__ : Union[str, Any] = "rag"
__magic_name__ : List[str] = True
def __init__(self : Optional[Any], __UpperCAmelCase : Dict=None, __UpperCAmelCase : Any=True, __UpperCAmelCase : int=None, __UpperCAmelCase : str=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : int=None, __UpperCAmelCase : Optional[Any]=" / ", __UpperCAmelCase : Union[str, Any]=" // ", __UpperCAmelCase : List[str]=5, __UpperCAmelCase : Optional[Any]=300, __UpperCAmelCase : Optional[int]=768, __UpperCAmelCase : Dict=8, __UpperCAmelCase : str="wiki_dpr", __UpperCAmelCase : Union[str, Any]="train", __UpperCAmelCase : List[Any]="compressed", __UpperCAmelCase : Any=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : int=False, __UpperCAmelCase : List[Any]=False, __UpperCAmelCase : Union[str, Any]=0.0, __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : Optional[int]=False, __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : Dict=True, __UpperCAmelCase : Optional[int]=None, **__UpperCAmelCase : int, ) -> str:
"""simple docstring"""
super().__init__(
bos_token_id=__UpperCAmelCase, pad_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, decoder_start_token_id=__UpperCAmelCase, forced_eos_token_id=__UpperCAmelCase, is_encoder_decoder=__UpperCAmelCase, prefix=__UpperCAmelCase, vocab_size=__UpperCAmelCase, **__UpperCAmelCase, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''question_encoder''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''generator''' )
SCREAMING_SNAKE_CASE : List[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.for_model(__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = AutoConfig.for_model(__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = reduce_loss
SCREAMING_SNAKE_CASE : List[Any] = label_smoothing
SCREAMING_SNAKE_CASE : str = exclude_bos_score
SCREAMING_SNAKE_CASE : List[str] = do_marginalize
SCREAMING_SNAKE_CASE : Union[str, Any] = title_sep
SCREAMING_SNAKE_CASE : str = doc_sep
SCREAMING_SNAKE_CASE : Union[str, Any] = n_docs
SCREAMING_SNAKE_CASE : List[str] = max_combined_length
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : Optional[int] = dataset_split
SCREAMING_SNAKE_CASE : List[Any] = index_name
SCREAMING_SNAKE_CASE : List[Any] = retrieval_vector_size
SCREAMING_SNAKE_CASE : Optional[int] = retrieval_batch_size
SCREAMING_SNAKE_CASE : Tuple = passages_path
SCREAMING_SNAKE_CASE : Optional[int] = index_path
SCREAMING_SNAKE_CASE : Tuple = use_dummy_dataset
SCREAMING_SNAKE_CASE : int = output_retrieved
SCREAMING_SNAKE_CASE : str = do_deduplication
SCREAMING_SNAKE_CASE : List[Any] = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE : Tuple = getattr(self.generator, '''forced_eos_token_id''', __UpperCAmelCase )
@classmethod
def lowercase__ (cls : Any, __UpperCAmelCase : PretrainedConfig, __UpperCAmelCase : PretrainedConfig, **__UpperCAmelCase : List[Any] ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__UpperCAmelCase )
def lowercase__ (self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[str] = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE : str = self.generator.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type
return output
| 355 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a__ ( _lowercase ):
__magic_name__ : Dict = "wav2vec2"
def __init__(self : Tuple, __UpperCAmelCase : Tuple=32, __UpperCAmelCase : int=768, __UpperCAmelCase : Optional[Any]=12, __UpperCAmelCase : Any=12, __UpperCAmelCase : Any=3072, __UpperCAmelCase : Optional[int]="gelu", __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : List[str]=0.1, __UpperCAmelCase : List[Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.0, __UpperCAmelCase : str=0.0, __UpperCAmelCase : Optional[Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.02, __UpperCAmelCase : Dict=1e-5, __UpperCAmelCase : List[str]="group", __UpperCAmelCase : str="gelu", __UpperCAmelCase : str=(512, 512, 512, 512, 512, 512, 512), __UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), __UpperCAmelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2), __UpperCAmelCase : Any=False, __UpperCAmelCase : Optional[Any]=128, __UpperCAmelCase : Tuple=16, __UpperCAmelCase : str=False, __UpperCAmelCase : int=True, __UpperCAmelCase : Tuple=0.05, __UpperCAmelCase : List[Any]=10, __UpperCAmelCase : Union[str, Any]=2, __UpperCAmelCase : Optional[Any]=0.0, __UpperCAmelCase : List[Any]=10, __UpperCAmelCase : Dict=0, __UpperCAmelCase : Any=320, __UpperCAmelCase : int=2, __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : Tuple=100, __UpperCAmelCase : Any=256, __UpperCAmelCase : Tuple=256, __UpperCAmelCase : str=0.1, __UpperCAmelCase : Any="sum", __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : str=False, __UpperCAmelCase : str=256, __UpperCAmelCase : Union[str, Any]=(512, 512, 512, 512, 1500), __UpperCAmelCase : Optional[int]=(5, 3, 3, 1, 1), __UpperCAmelCase : Dict=(1, 2, 3, 1, 1), __UpperCAmelCase : List[str]=512, __UpperCAmelCase : Union[str, Any]=0, __UpperCAmelCase : List[Any]=1, __UpperCAmelCase : List[str]=2, __UpperCAmelCase : List[Any]=False, __UpperCAmelCase : str=3, __UpperCAmelCase : Tuple=2, __UpperCAmelCase : str=3, __UpperCAmelCase : int=None, __UpperCAmelCase : List[Any]=None, **__UpperCAmelCase : int, ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase, pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_activation
SCREAMING_SNAKE_CASE : Dict = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = conv_bias
SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : int = feat_proj_dropout
SCREAMING_SNAKE_CASE : List[str] = final_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : str = apply_spec_augment
SCREAMING_SNAKE_CASE : Dict = mask_time_prob
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_length
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Dict = mask_feature_prob
SCREAMING_SNAKE_CASE : Tuple = mask_feature_length
SCREAMING_SNAKE_CASE : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE : int = num_codevector_groups
SCREAMING_SNAKE_CASE : Optional[int] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = num_negatives
SCREAMING_SNAKE_CASE : Any = codevector_dim
SCREAMING_SNAKE_CASE : str = proj_codevector_dim
SCREAMING_SNAKE_CASE : Optional[int] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : int = add_adapter
SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE : int = adapter_stride
SCREAMING_SNAKE_CASE : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE : Tuple = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : int = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = xvector_output_dim
@property
def lowercase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 355 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ ( _lowerCamelCase : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float ):
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 |
"""simple docstring"""
__lowercase : Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__lowercase : Any = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ):
lowerCamelCase_ = from_type.lower().strip('''s''' )
lowerCamelCase_ = to_type.lower().strip('''s''' )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=UpperCAmelCase__):
'''simple docstring'''
__a : Dict = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *A , **A ) ->List[str]:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A__ ( cls , *A , **A ) ->Optional[Any]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A__ ( cls , *A , **A ) ->int:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 433 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=24 , A=2 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=None , A=10_00 , ) ->Any:
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :List[str] = batch_size
UpperCAmelCase__ :Optional[int] = seq_length
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_input_mask
UpperCAmelCase__ :Optional[int] = use_token_type_ids
UpperCAmelCase__ :int = use_labels
UpperCAmelCase__ :Tuple = vocab_size
UpperCAmelCase__ :int = hidden_size
UpperCAmelCase__ :Any = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :List[str] = hidden_act
UpperCAmelCase__ :Any = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :List[str] = max_position_embeddings
UpperCAmelCase__ :str = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :int = initializer_range
UpperCAmelCase__ :str = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = range_bbox
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :List[Any] = bbox[i, j, 3]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCAmelCase__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :Any = bbox[i, j, 2]
UpperCAmelCase__ :Dict = bbox[i, j, 0]
UpperCAmelCase__ :Optional[Any] = t
UpperCAmelCase__ :int = None
if self.use_input_mask:
UpperCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ :int = None
if self.use_token_type_ids:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A__ ( self , A , A , A , A , A , A , A , ) ->Any:
UpperCAmelCase__ :Any = LiltModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(A , bbox=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase__ :List[str] = model(A , bbox=A , token_type_ids=A )
UpperCAmelCase__ :int = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Dict:
UpperCAmelCase__ :List[str] = self.num_labels
UpperCAmelCase__ :Optional[Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :str = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :str = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Dict = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : int = False
def A__ ( self , A , A , A , A , A ) ->str:
return True
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Dict = LiltModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A__ ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A__ ( self ) ->int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Union[str, Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->int:
UpperCAmelCase__ :int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A )
UpperCAmelCase__ :List[Any] = torch.tensor([[1, 2]] , device=A )
UpperCAmelCase__ :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ :Union[str, Any] = model(input_ids=A , bbox=A )
UpperCAmelCase__ :Tuple = torch.Size([1, 2, 7_68] )
UpperCAmelCase__ :Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 433 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __a , unittest.TestCase ):
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_ = dict(zip(__a , range(len(__a ) ) ) )
SCREAMING_SNAKE_CASE_ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return "lower newer", "lower newer"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE_ = """lower"""
SCREAMING_SNAKE_CASE_ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
SCREAMING_SNAKE_CASE_ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _lowercase (self , SCREAMING_SNAKE_CASE_=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
SCREAMING_SNAKE_CASE_ = """This is a simple input"""
SCREAMING_SNAKE_CASE_ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE_ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def _lowercase (self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( __a ):
pass | 626 |
from __future__ import annotations
from collections import namedtuple
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
__lowercase : str = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 149 | 0 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[Any]:
lowerCamelCase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase_ ,lowerCamelCase_ = True, True
lowerCamelCase_ = dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return path
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = 0
lowerCamelCase_ = -1
for i in range(__UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCamelCase_ ,lowerCamelCase_ = check_circuit_or_path(__UpperCamelCase ,__UpperCamelCase )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
lowerCamelCase_ = 1
if check == 2:
lowerCamelCase_ = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
lowerCamelCase_ = dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
print(__UpperCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase_ = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase_ = 10
check_euler(__UpperCamelCase ,__UpperCamelCase )
check_euler(__UpperCamelCase ,__UpperCamelCase )
check_euler(__UpperCamelCase ,__UpperCamelCase )
check_euler(__UpperCamelCase ,__UpperCamelCase )
check_euler(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 384 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = [0]
lowerCamelCase_ = [0]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
lowerCamelCase_ = [60]
lowerCamelCase_ = [10]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 3
lowerCamelCase_ = [1, 2, 3]
lowerCamelCase_ = [3, 2, 1]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = 50
lowerCamelCase_ = [60, 100, 120]
lowerCamelCase_ = [10, 20, 30]
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 384 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if len(lowerCAmelCase__ ) < k or k < 0:
raise ValueError('Invalid Input' )
A = A = sum(array[:k] )
for i in range(len(lowerCAmelCase__ ) - k ):
A = current_sum - array[i] + array[i + k]
A = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__snake_case :Any =[randint(-1000, 1000) for i in range(100)]
__snake_case :Tuple =randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''') | 106 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 575 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(_snake_case )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(_snake_case )
else:
__lowerCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCamelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_snake_case , '''half''' ):
__lowerCamelCase = module.half()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , safety_checker=_snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 575 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCAmelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
lowerCAmelCase__ = {F"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( A__):
snake_case__ : int = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case__ : List[Any] = FunnelTokenizer
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = 2
def __init__( self : str , __lowerCAmelCase : int=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : Optional[Any]="<sep>" , __lowerCAmelCase : List[Any]="<pad>" , __lowerCAmelCase : List[Any]="<cls>" , __lowerCAmelCase : Union[str, Any]="<mask>" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Union[str, Any]="</s>" , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple="##" , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , clean_text=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , wordpieces_prefix=_lowerCamelCase , **_lowerCamelCase , )
_lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_lowerCamelCase : int = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : Tuple = strip_accents
_lowerCamelCase : int = tokenize_chinese_chars
_lowerCamelCase : Tuple = normalizer_class(**_lowerCamelCase )
_lowerCamelCase : Dict = do_lower_case
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : int = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 83 |
"""simple docstring"""
from collections.abc import Callable
class a__ :
def __init__( self :Tuple , _lowerCamelCase :Callable | None = None ):
'''simple docstring'''
UpperCamelCase_ : list =[]
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ : dict ={}
# Stores current size of heap.
UpperCamelCase_ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ : List[str] =key or (lambda _lowerCamelCase : x)
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : List[str] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =self.arr[j], self.arr[i]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self._left(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self._right(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[Any] =right
return valid_parent
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Dict =parent, self._parent(_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : int =valid_parent, self._get_valid_parent(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : List[Any] =self.pos_map[item]
UpperCamelCase_ : int =[item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : Any =self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ : Dict =self.arr[self.size - 1]
UpperCamelCase_ : Optional[int] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
UpperCamelCase_ : str =[item, self.key(_lowerCamelCase )]
UpperCamelCase_ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = """switch_transformers"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , lowercase_=3_21_28 , lowercase_=7_68 , lowercase_=64 , lowercase_=20_48 , lowercase_=64 , lowercase_=12 , lowercase_=3 , lowercase_=12 , lowercase_=3 , lowercase_=12 , lowercase_=8 , lowercase_=False , lowercase_=0.01 , lowercase_="float32" , lowercase_=False , lowercase_=32 , lowercase_=1_28 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.001 , lowercase_=0.001 , lowercase_=1.0 , lowercase_="relu" , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=0 , lowercase_=1 , **lowercase_ , ):
snake_case_ : Dict = vocab_size
snake_case_ : str = d_model
snake_case_ : str = d_kv
snake_case_ : Tuple = d_ff
snake_case_ : Tuple = num_sparse_encoder_layers
snake_case_ : Any = num_layers
snake_case_ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case_ : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case_ : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case_ : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case_ : Optional[int] = num_heads
snake_case_ : Dict = num_experts
snake_case_ : Any = expert_capacity
snake_case_ : Tuple = router_bias
snake_case_ : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
snake_case_ : int = router_dtype
snake_case_ : Tuple = router_ignore_padding_tokens
snake_case_ : Tuple = relative_attention_num_buckets
snake_case_ : Optional[Any] = relative_attention_max_distance
snake_case_ : Dict = dropout_rate
snake_case_ : int = layer_norm_epsilon
snake_case_ : str = initializer_factor
snake_case_ : str = feed_forward_proj
snake_case_ : Optional[int] = use_cache
snake_case_ : Optional[int] = add_router_probs
snake_case_ : List[str] = router_z_loss_coef
snake_case_ : List[str] = router_aux_loss_coef
snake_case_ : str = self.feed_forward_proj.split("-")
snake_case_ : Dict = act_info[-1]
snake_case_ : int = act_info[0] == "gated"
if len(lowercase_) > 1 and act_info[0] != "gated" or len(lowercase_) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case_ : Tuple = "gelu_new"
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
| 92 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : int = ["""image_processor""", """tokenizer"""]
_A : Union[str, Any] = """ChineseCLIPImageProcessor"""
_A : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
__UpperCAmelCase = kwargs.pop('feature_extractor' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
__UpperCAmelCase = self.image_processor
def __call__( self , __A=None , __A=None , __A=None , **__A ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCAmelCase = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
__UpperCAmelCase = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
| 126 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
_A : Union[str, Any] = MBartConfig
_A : Tuple = {}
_A : Tuple = """gelu"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_mbart_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = TFMBartModel(config=__A ).get_decoder()
__UpperCAmelCase = inputs_dict['input_ids']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
__UpperCAmelCase = inputs_dict['head_mask']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
__UpperCAmelCase = past_key_values[1]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , )-> Tuple:
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : str = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[str] = True
_A : Optional[int] = False
_A : Optional[Any] = False
def __lowerCamelCase ( self , __A , __A , __A , __A , __A ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFMBartModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
_A : List[str] = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
_A : Optional[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
_A : Optional[int] = """facebook/mbart-large-en-ro"""
@cached_property
def __lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.translate_src_text(**__A )
self.assertListEqual(self.expected_text , __A )
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.tokenizer(self.src_text , **__A , return_tensors='tf' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__UpperCAmelCase = self.tokenizer.batch_decode(__A , skip_special_tokens=__A )
return generated_words
@slow
def __lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 126 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 165 |
"""simple docstring"""
def _lowerCAmelCase(a : int ) -> int:
if n == 1 or not isinstance(a , a ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _lowerCAmelCase(a : int ) -> int:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE =len(str(fibonacci(a ) ) )
return index
def _lowerCAmelCase(a : int = 1000 ) -> int:
return fibonacci_digits_index(a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 165 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE : List[str] = False
class UpperCamelCase ( unittest.TestCase ):
def A_ (self , __UpperCamelCase=32 ) -> Optional[int]:
set_seed(0 )
UpperCamelCase_ : Optional[Any] = UNetaDModel(sample_size=__UpperCamelCase , in_channels=3 , out_channels=3 )
UpperCamelCase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def A_ (self ) -> str:
UpperCamelCase_ : str = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCamelCase_ : Tuple = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__UpperCamelCase , )
UpperCamelCase_ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__UpperCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCamelCase_ : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__UpperCamelCase ) for _ in range(4 )]
UpperCamelCase_ : str = [torch.randn((4, 3, 32, 32) ).to(__UpperCamelCase ) for _ in range(4 )]
UpperCamelCase_ : Union[str, Any] = [torch.randint(0 , 1_000 , (4,) ).long().to(__UpperCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(__UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
UpperCamelCase_ : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCamelCase_ : List[Any] = model(__UpperCamelCase , timesteps[i] ).sample
UpperCamelCase_ : Dict = torch.nn.functional.mse_loss(__UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(__UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
UpperCamelCase_ : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCamelCase_ : Optional[int] = model(__UpperCamelCase , timesteps[i] ).sample
UpperCamelCase_ : Union[str, Any] = torch.nn.functional.mse_loss(__UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
| 635 | from __future__ import annotations
SCREAMING_SNAKE_CASE : Tuple = 1.6_021E-19 # units = C
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a ( A_ ):
A_ : Optional[int] = '''wav2vec2'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : int=32 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Any=0.02 , lowerCamelCase_ : Tuple=1E-5 , lowerCamelCase_ : Optional[int]="group" , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase_ : str=False , lowerCamelCase_ : Dict=1_28 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=0.05 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Any=3_20 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Any=1_00 , lowerCamelCase_ : str=2_56 , lowerCamelCase_ : List[str]=2_56 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple="sum" , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Tuple=2_56 , lowerCamelCase_ : Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase_ : List[str]=(5, 3, 3, 1, 1) , lowerCamelCase_ : Tuple=(1, 2, 3, 1, 1) , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Tuple=1 , lowerCamelCase_ : int=2 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Dict , ) -> int:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase_ )
__a = list(lowerCamelCase_ )
__a = list(lowerCamelCase_ )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = vocab_size
__a = do_stable_layer_norm
__a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a = num_codevectors_per_group
__a = num_codevector_groups
__a = contrastive_logits_temperature
__a = feat_quantizer_dropout
__a = num_negatives
__a = codevector_dim
__a = proj_codevector_dim
__a = diversity_loss_weight
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# adapter
__a = add_adapter
__a = adapter_kernel_size
__a = adapter_stride
__a = num_adapter_layers
__a = output_hidden_size or hidden_size
__a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a = list(lowerCamelCase_ )
__a = list(lowerCamelCase_ )
__a = list(lowerCamelCase_ )
__a = xvector_output_dim
@property
def lowerCAmelCase_ ( self : int ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 173 | """simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 173 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
__A : str = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if number < 1:
__A : Optional[int] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(a__ )
__A : Optional[Any] = 1
for i in range(1 ,a__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> Tuple:
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : int = seq_length
lowercase : List[str] = is_training
lowercase : str = use_attention_mask
lowercase : List[str] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Dict = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : int = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Union[str, Any] = num_choices
def a__ ( self ) -> Optional[int]:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_attention_mask:
lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a_ , )
return config, input_ids, attention_mask
def a__ ( self ) -> List[str]:
lowercase : Dict = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Dict = config_and_inputs
lowercase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Dict:
lowercase : Optional[int] = FlaxDistilBertModelTester(self )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowercase : int = model_class_name.from_pretrained("distilbert-base-uncased" )
lowercase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : str = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : List[str] = model(a_ , attention_mask=a_ )[0]
lowercase : int = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a_ )
lowercase : List[str] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 372 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
UpperCamelCase_ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase_ = 'nezha'
def __init__( self : Any , UpperCAmelCase__ : str=21128 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : int=64 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =vocab_size
lowercase : Optional[int] =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Optional[Any] =hidden_act
lowercase : str =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : List[Any] =max_relative_position
lowercase : List[str] =type_vocab_size
lowercase : Optional[Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Optional[Any] =classifier_dropout
lowercase : Dict =use_cache
| 711 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=[30, 30] , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : List[str]=37 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=10 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 , SCREAMING_SNAKE_CASE_ : Dict=10 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = n_targets
lowerCAmelCase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ = []
for i in range(self.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = YolosModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ :Union[str, Any] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Any = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :List[Any] = False
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=False ):
lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.long )
lowerCAmelCase__ = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = labels
return inputs_dict
def __snake_case ( self : Any ):
lowerCAmelCase__ = YolosModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __snake_case ( self : int ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self : Union[str, Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# YOLOS has a different seq_length
lowerCAmelCase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Optional[int] ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ () -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase__ = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [75, 75, 17, 63, 17]
lowerCAmelCase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , SCREAMING_SNAKE_CASE_ ) )
| 668 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Dict = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case : Union[str, Any] = 'The dog is cute and lives in the garden house'
_snake_case : Union[str, Any] = jnp.array([tokenizer.encode(__lowerCAmelCase )] )
_snake_case : Optional[int] = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
_snake_case : Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case : str = model(__lowerCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1e-3 ) )
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase__ ( _lowercase : str , _lowercase : str ) -> str:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase: str = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase: Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
__UpperCAmelCase: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase: Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
__UpperCAmelCase: List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__UpperCAmelCase: str = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase: str = key.split(""".""" )
if attributes[0] == "lm_head":
__UpperCAmelCase: List[str] = prophet
__UpperCAmelCase: Optional[int] = prophet_old
else:
__UpperCAmelCase: List[str] = prophet.prophetnet
__UpperCAmelCase: List[Any] = prophet_old.model
__UpperCAmelCase: Tuple = False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase: List[Any] = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
__UpperCAmelCase: Optional[int] = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase: str = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase: Union[str, Any] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
__UpperCAmelCase: List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase: Dict = old_model.bias
logger.info(F'''{attribute} is initialized''' )
__UpperCAmelCase: Any = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , """in_proj_weight""" ):
__UpperCAmelCase: Any = old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase: str = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase: Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase: Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase: Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase: int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase: List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase: Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__UpperCAmelCase: Any = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__UpperCAmelCase: List[Any] = True
break
if attribute.isdigit():
__UpperCAmelCase: Dict = model[int(UpperCamelCase__ )]
__UpperCAmelCase: Optional[int] = old_model[int(UpperCamelCase__ )]
else:
__UpperCAmelCase: Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
__UpperCAmelCase: Optional[Any] = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
__UpperCAmelCase: List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 523 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case ( ) -> int:
lowerCamelCase : List[str] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase : List[Any] = Dataset.from_dict(UpperCamelCase__ )
return dataset
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: List[str] )-> str:
lowerCamelCase : Union[str, Any] = get_dataset()
lowerCamelCase : Union[str, Any] = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : List[str] = get_dataset()
lowerCamelCase , lowerCamelCase : Optional[Any] = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __a )
| 222 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase = False
class __lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
image=A ,generator=A ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ,).images
UpperCAmelCase__ : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 194 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : int = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : List[str] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(A )
UpperCAmelCase__ : int = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = 0
while i < len(A ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(A )
UpperCAmelCase__ : str = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(A )
UpperCAmelCase__ : int = """ """.join(A )
UpperCAmelCase__ : List[str] = word
return word
def __lowercase ( self : Optional[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,A : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(A )
def __lowercase ( self : Optional[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = """""".join(A )
UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Any = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase__ : Any = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Dict = """ """ + text
return (text, kwargs)
def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 194 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : List[str]="divided_space_time" , __lowerCamelCase : List[Any]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = attention_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE = self.num_labels
return config
def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify the logits shape
SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str=False ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _snake_case ( self : Optional[Any] ):
pass
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : str ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.num_frames
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _snake_case ( self : Any ):
def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_video()
SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A__ ( ):
'''simple docstring'''
snake_case__ : Union[str, Any] =[randint(-1000 , 1000 ) for i in range(10 )]
snake_case__ : Any =randint(-5000 , 5000 )
return (arr, r)
__lowerCamelCase : Tuple = make_dataset()
def A__ ( _a : list[int] , _a : int ):
'''simple docstring'''
for triplet in permutations(_a , 3 ):
if sum(_a ) == target:
return tuple(sorted(_a ) )
return (0, 0, 0)
def A__ ( _a : list[int] , _a : int ):
'''simple docstring'''
arr.sort()
snake_case__ : Optional[int] =len(_a )
for i in range(n - 1 ):
snake_case__ , snake_case__ : Dict =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A__ ( ):
'''simple docstring'''
snake_case__ : Optional[int] ="""
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
snake_case__ : str ="""
triplet_sum1(*dataset)
"""
snake_case__ : List[Any] ="""
triplet_sum2(*dataset)
"""
snake_case__ : Union[str, Any] =repeat(setup=_a , stmt=_a , repeat=5 , number=10000 )
snake_case__ : Dict =repeat(setup=_a , stmt=_a , repeat=5 , number=10000 )
return (min(_a ), min(_a ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCamelCase : int = solution_times()
print(F"The time for naive implementation is {times[0]}.")
print(F"The time for optimized implementation is {times[1]}.")
| 448 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ):
snake_case__ : Any =parent
snake_case__ : Dict =batch_size
snake_case__ : List[Any] =seq_length
snake_case__ : str =is_training
snake_case__ : Union[str, Any] =use_attention_mask
snake_case__ : str =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Tuple =vocab_size
snake_case__ : List[str] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : Optional[Any] =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : str =hidden_act
snake_case__ : Union[str, Any] =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : Tuple =max_position_embeddings
snake_case__ : str =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : List[Any] =num_choices
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_attention_mask:
snake_case__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] =config_and_inputs
snake_case__ : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( _A , unittest.TestCase ):
_a : str = True
_a : Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
snake_case__ : Optional[Any] =FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a )
snake_case__ : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str =model(a )[0]
snake_case__ : List[str] =5_0_0_0_0
snake_case__ : str =(1, 6, vocab_size)
self.assertEqual(output.shape , a )
snake_case__ : Optional[int] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 448 | 1 |
def A ( lowercase__ : int ) -> str:
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCamelCase__ :List[Any] = len(bin(lowercase__ )[3:] )
UpperCamelCase__ :str = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ :List[Any] = (
(
"""1"""
+ """0""" * (binary_number_length - len(lowercase__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def UpperCamelCase__ ( _lowercase : dict , _lowercase : Any , _lowercase : List[str] ) -> list[str]:
__UpperCAmelCase: Optional[int] = set()
# keep track of all the paths to be checked
__UpperCAmelCase: Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCAmelCase: int = queue.pop(0 )
# get the last node from the path
__UpperCAmelCase: Optional[Any] = path[-1]
if node not in explored:
__UpperCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCAmelCase: Union[str, Any] = list(_lowercase )
new_path.append(_lowercase )
queue.append(_lowercase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowercase )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase__ ( _lowercase : dict , _lowercase : List[Any] , _lowercase : Dict ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCAmelCase: str = [start]
__UpperCAmelCase: int = set(_lowercase )
# Keep tab on distances from `start` node.
__UpperCAmelCase: List[Any] = {start: 0, target: -1}
while queue:
__UpperCAmelCase: str = queue.pop(0 )
if node == target:
__UpperCAmelCase: Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowercase )
queue.append(_lowercase )
__UpperCAmelCase: int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 466 | '''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
"""simple docstring"""
def __init__( self , snake_case_ = None ):
'''simple docstring'''
if components is None:
__UpperCAmelCase: List[Any] = []
__UpperCAmelCase: Tuple = list(snake_case_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(snake_case_ , self.__components ) ) + ")"
def __add__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = len(self )
if size == len(snake_case_ ):
__UpperCAmelCase: str = [self.__components[i] + other.component(snake_case_ ) for i in range(snake_case_ )]
return Vector(snake_case_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = len(self )
if size == len(snake_case_ ):
__UpperCAmelCase: Dict = [self.__components[i] - other.component(snake_case_ ) for i in range(snake_case_ )]
return Vector(snake_case_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
def __mul__( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , (float, int) ):
__UpperCAmelCase: str = [c * other for c in self.__components]
return Vector(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and len(self ) == len(snake_case_ ):
__UpperCAmelCase: Dict = len(self )
__UpperCAmelCase: List[str] = [self.__components[i] * other.component(snake_case_ ) for i in range(snake_case_ )]
return sum(snake_case_ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase_ ( self ):
'''simple docstring'''
return Vector(self.__components )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCAmelCase: List[str] = value
def lowercase_ ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__UpperCAmelCase: Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(snake_case_ ) )
def lowercase_ ( self , snake_case_ , snake_case_ = False ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self * other
__UpperCAmelCase: Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase__ ( _lowercase : int ) -> Vector:
assert isinstance(_lowercase , _lowercase )
return Vector([0] * dimension )
def UpperCamelCase__ ( _lowercase : int , _lowercase : int ) -> Vector:
assert isinstance(_lowercase , _lowercase ) and (isinstance(_lowercase , _lowercase ))
__UpperCAmelCase: int = [0] * dimension
__UpperCAmelCase: int = 1
return Vector(_lowercase )
def UpperCamelCase__ ( _lowercase : float , _lowercase : Vector , _lowercase : Vector ) -> Vector:
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and (isinstance(_lowercase , (int, float) ))
)
return x * scalar + y
def UpperCamelCase__ ( _lowercase : int , _lowercase : int , _lowercase : int ) -> Vector:
random.seed(_lowercase )
__UpperCAmelCase: str = [random.randint(_lowercase , _lowercase ) for _ in range(_lowercase )]
return Vector(_lowercase )
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = matrix
__UpperCAmelCase: Dict = w
__UpperCAmelCase: Tuple = h
def __str__( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , snake_case_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase: List[str] = []
for i in range(self.__height ):
__UpperCAmelCase: List[Any] = [
self.__matrix[i][j] + other.component(snake_case_ , snake_case_ )
for j in range(self.__width )
]
matrix.append(snake_case_ )
return Matrix(snake_case_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , snake_case_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase: int = []
for i in range(self.__height ):
__UpperCAmelCase: Any = [
self.__matrix[i][j] - other.component(snake_case_ , snake_case_ )
for j in range(self.__width )
]
matrix.append(snake_case_ )
return Matrix(snake_case_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
def __mul__( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ): # matrix-vector
if len(snake_case_ ) == self.__width:
__UpperCAmelCase: Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__UpperCAmelCase: Dict = [
self.__matrix[i][j] * other.component(snake_case_ )
for j in range(self.__width )
]
ans.change_component(snake_case_ , sum(snake_case_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(snake_case_ , (int, float) ): # matrix-scalar
__UpperCAmelCase: Dict = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(snake_case_ , self.__width , self.__height )
return None
def lowercase_ ( self ):
'''simple docstring'''
return self.__height
def lowercase_ ( self ):
'''simple docstring'''
return self.__width
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCAmelCase: int = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__UpperCAmelCase: int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(snake_case_ ) ):
__UpperCAmelCase: Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(snake_case_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(snake_case_ , snake_case_ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase_ ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCAmelCase: Any = [
self.__matrix[0][y] * self.cofactor(0 , snake_case_ ) for y in range(self.__width )
]
return sum(snake_case_ )
def UpperCamelCase__ ( _lowercase : int ) -> Matrix:
__UpperCAmelCase: list[list[float]] = [[0] * n for _ in range(_lowercase )]
return Matrix(_lowercase , _lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ) -> Matrix:
random.seed(_lowercase )
__UpperCAmelCase: list[list[float]] = [
[random.randint(_lowercase , _lowercase ) for _ in range(_lowercase )] for _ in range(_lowercase )
]
return Matrix(_lowercase , _lowercase , _lowercase ) | 466 | 1 |
from random import randint, random
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ,__UpperCamelCase : int = 5 ,):
lowerCAmelCase_ : str = [[-1] * number_of_cells] # Create a highway without any car
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = max(__UpperCamelCase ,0 )
while i < number_of_cells:
lowerCAmelCase_ : Optional[Any] = (
randint(0 ,__UpperCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 ,max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : int ):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = highway_now[car_index + 1 :]
for cell in range(len(__UpperCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCamelCase ,-1 )
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : float ,__UpperCamelCase : int ):
lowerCAmelCase_ : int = len(__UpperCamelCase )
# Beforce calculations, the highway is empty
lowerCAmelCase_ : Any = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCAmelCase_ : List[Any] = min(highway_now[car_index] + 1 ,__UpperCamelCase )
# Number of empty cell before the next car
lowerCAmelCase_ : Optional[int] = get_distance(__UpperCamelCase ,__UpperCamelCase ) - 1
# We can't have the car causing an accident
lowerCAmelCase_ : List[str] = min(next_highway[car_index] ,__UpperCamelCase )
if random() < probability:
# Randomly, a driver will slow down
lowerCAmelCase_ : Optional[Any] = max(next_highway[car_index] - 1 ,0 )
return next_highway
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : int ,__UpperCamelCase : float ,__UpperCamelCase : int ):
lowerCAmelCase_ : Optional[int] = len(highway[0] )
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : str = update(highway[i] ,__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Tuple = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
lowerCAmelCase_ : str = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCAmelCase_ : str = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCAmelCase_ : Union[str, Any] = speed
highway.append(__UpperCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ), supervised_keys=_lowercase, )
def a__ ( self, _lowercase, _lowercase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'examples': get_test_dummy_examples()} )]
def a__ ( self, _lowercase, _lowercase ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowercase )
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a__ ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ), supervised_keys=_lowercase, )
def a__ ( self, _lowercase, _lowercase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'examples': get_test_nested_examples()} )
]
def a__ ( self, _lowercase, _lowercase ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowercase )
def _UpperCamelCase ( ) -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def _UpperCamelCase ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class snake_case ( lowercase_ ):
"""simple docstring"""
@require_beam
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
self.assertDictEqual(dset['train'][0], get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self ) -> List[str]:
import apache_beam as beam
SCREAMING_SNAKE_CASE_ = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE_ = partial(_lowercase, num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ), sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase )
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare )
@require_beam
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = NestedBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features, datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
self.assertDictEqual(dset['train'][0], get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
| 719 |
'''simple docstring'''
from math import ceil
def _UpperCamelCase ( lowerCAmelCase__: int = 1001 ) -> int:
SCREAMING_SNAKE_CASE_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE_ = 2 * i + 1
SCREAMING_SNAKE_CASE_ = 2 * i
SCREAMING_SNAKE_CASE_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 238 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( a__ , a__ = 16 , a__ = "bert-base-cased" ) ->Any:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(a__ )
_UpperCamelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(a__ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase = datasets.map(
a__ , batched=a__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(a__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets["train"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
_UpperCamelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config["lr"]
_UpperCamelCase = int(config["num_epochs"] )
_UpperCamelCase = int(config["seed"] )
_UpperCamelCase = int(config["batch_size"] )
_UpperCamelCase = args.model_name_or_path
set_seed(a__ )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(a__ , a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(a__ , return_dict=a__ )
# Instantiate optimizer
_UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase = optimizer_cls(params=model.parameters() , lr=a__ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCamelCase = 1
_UpperCamelCase = (len(a__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=0 , num_training_steps=a__ , )
else:
_UpperCamelCase = DummyScheduler(a__ , total_num_steps=a__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase = 0
# Now we train the model
_UpperCamelCase = evaluate.load("glue" , "mrpc" )
_UpperCamelCase = 0
_UpperCamelCase = {}
for epoch in range(a__ , a__ ):
model.train()
for step, batch in enumerate(a__ ):
_UpperCamelCase = model(**a__ )
_UpperCamelCase = outputs.loss
_UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_UpperCamelCase = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**a__ )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase , _UpperCamelCase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__ ) - 1:
_UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ , references=a__ , )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , a__ )
_UpperCamelCase = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_UpperCamelCase = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(a__ , a__ )
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=a__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a__ , )
parser.add_argument(
"--output_dir" , type=a__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=a__ , default=a__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=a__ , default=3 , help="Number of train epochs." , )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 547 | from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase__ ( a__ , a__ , a__ = None ) ->str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCamelCase = quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type="dataset" , revision=a__ )
| 547 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionSAGPipeline
A : str = TEXT_TO_IMAGE_PARAMS
A : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
A : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Any = False
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ = CLIPTextModel(lowerCAmelCase)
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Dict=0) -> Optional[int]:
"""simple docstring"""
if str(lowerCAmelCase).startswith('mps'):
lowercase__ = torch.manual_seed(lowerCAmelCase)
else:
lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 642 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = TextDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = TextDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
lowerCamelCase_ = TextDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase ,__UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = text_path
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = [text_path]
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
lowerCamelCase_ = TextDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=("train",) ) -> List[Any]:
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
for split in splits:
lowerCamelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = TextDatasetReader({'train': text_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCamelCase_ = {'text': 'string'}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = TextDatasetReader({'train': text_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
if split:
lowerCamelCase_ = {split: text_path}
else:
lowerCamelCase_ = 'train'
lowerCamelCase_ = {'train': text_path, 'test': text_path}
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'text': 'string'}
lowerCamelCase_ = TextDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 42 | from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowercase = bertabert.config.encoder.vocab_size
__lowercase = tokenizer.sep_token_id
__lowercase = tokenizer.cls_token_id
__lowercase = 1_28
__lowercase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowercase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowercase = train_dataset.select(range(32 ) )
__lowercase = val_dataset.select(range(16 ) )
__lowercase = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowercase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=5_12 )
__lowercase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=1_28 )
__lowercase = inputs.input_ids
__lowercase = inputs.attention_mask
__lowercase = outputs.input_ids
__lowercase = outputs.input_ids.copy()
__lowercase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowercase = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
__lowercase = pred.label_ids
__lowercase = pred.predictions
# all unnecessary tokens are removed
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
__lowercase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowercase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy='''steps''' , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowercase = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train() | 534 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , '''decord''' )
self.check_model_type(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]:
A : List[str] = {}
if frame_sampling_rate is not None:
A : Optional[int] = frame_sampling_rate
if num_frames is not None:
A : Dict = num_frames
A : Dict = {}
if top_k is not None:
A : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 ) -> Dict:
if num_frames is None:
A : Optional[Any] = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
A : int = BytesIO(requests.get(__UpperCAmelCase ).content )
A : Tuple = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A : int = 0
A : Dict = num_frames * frame_sampling_rate - 1
A : Union[str, Any] = np.linspace(__UpperCAmelCase , __UpperCAmelCase , num=__UpperCAmelCase , dtype=np.intaa )
A : List[str] = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A : Dict = list(__UpperCAmelCase )
A : Any = self.image_processor(__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __UpperCAmelCase ) -> str:
A : Any = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
A : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
A : Dict = model_outputs.logits.softmax(-1 )[0]
A : int = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
A : Optional[Any] = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
| 705 |
lowercase : Tuple = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 423 | 0 |
'''simple docstring'''
import math
import sys
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = ""
try:
with open(_lowerCAmelCase , "rb" ) as binary_file:
_lowerCamelCase : List[Any] = binary_file.read()
for dat in data:
_lowerCamelCase : Optional[Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {"0": "0", "1": "1"}
_lowerCamelCase , _lowerCamelCase : List[str] = "", ""
_lowerCamelCase : List[Any] = len(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCamelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
_lowerCamelCase : List[str] = last_match_id + "0"
if math.loga(_lowerCAmelCase ).is_integer():
_lowerCamelCase : Dict = {}
for curr_key in list(_lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = lexicon.pop(_lowerCAmelCase )
_lowerCamelCase : List[str] = new_lex
_lowerCamelCase : Union[str, Any] = last_match_id + "1"
index += 1
_lowerCamelCase : Union[str, Any] = ""
return result
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = 8
try:
with open(_lowerCAmelCase , "wb" ) as opened_file:
_lowerCamelCase : int = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCamelCase : Tuple = data_bits[counter:]
_lowerCamelCase : int = data_bits[counter + 1 :]
return data_bits
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = read_file_binary(_lowerCAmelCase )
_lowerCamelCase : List[str] = remove_prefix(_lowerCAmelCase )
_lowerCamelCase : List[Any] = decompress_data(_lowerCAmelCase )
write_file_binary(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 44 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list ) -> float:
SCREAMING_SNAKE_CASE_ : Dict =0
while len(UpperCAmelCase_ ) > 1:
SCREAMING_SNAKE_CASE_ : Tuple =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE_ : int =files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 | 0 |
"""simple docstring"""
A : str = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
A : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
A : Any = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
A : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
A : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
A : Any = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
A : List[str] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
A : Tuple = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 282 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A : Dict = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id="test-config" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-config-org" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
CustomConfig.register_for_auto_class()
__lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__lowerCAmelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase = c.n_embd + 1 # int
__lowerCAmelCase = c.resid_pdrop + 1.0 # float
__lowerCAmelCase = not c.scale_attn_weights # bool
__lowerCAmelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__a , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__a , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__a , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__a , c.summary_type , "mismatch for key: summary_type" )
def snake_case ( self ):
__lowerCAmelCase = PretrainedConfig()
__lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(__a )}." )
def snake_case ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__a )
def snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_00
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def snake_case ( self ):
__lowerCAmelCase = AutoConfig.from_pretrained("bert-base-cased" )
__lowerCAmelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase = ["config.42.0.0.json"]
__lowerCAmelCase = 7_68
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , "config.4.0.0.json" ) , os.path.join(__a , "config.42.0.0.json" ) )
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__lowerCAmelCase = "v4.0.0"
__lowerCAmelCase , __lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase = "v3.0.0"
__lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 282 | 1 |
from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : int | None = None ) -> str:
__UpperCAmelCase : int = value
__UpperCAmelCase : Node | None = None # Added in order to delete a node easier
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def __repr__( self : Tuple ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Node | None = None ) -> int:
__UpperCAmelCase : List[Any] = root
def __str__( self : Dict ) -> str:
return str(self.root )
def UpperCAmelCase ( self : Any , __lowercase : Node , __lowercase : Node | None ) -> None:
if new_children is not None: # reset its kids
__UpperCAmelCase : int = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
__UpperCAmelCase : List[Any] = new_children
else:
__UpperCAmelCase : Tuple = new_children
else:
__UpperCAmelCase : List[Any] = new_children
def UpperCAmelCase ( self : int , __lowercase : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase ( self : Union[str, Any] ) -> bool:
return self.root is None
def UpperCAmelCase ( self : List[Any] , __lowercase : Any ) -> None:
__UpperCAmelCase : int = Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCAmelCase : int = new_node # set its root
else: # Tree is not empty
__UpperCAmelCase : List[str] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCAmelCase : Dict = new_node # We insert the new node in a leaf
break
else:
__UpperCAmelCase : Dict = parent_node.left
else:
if parent_node.right is None:
__UpperCAmelCase : Union[str, Any] = new_node
break
else:
__UpperCAmelCase : Optional[Any] = parent_node.right
__UpperCAmelCase : int = parent_node
def UpperCAmelCase ( self : Any , *__lowercase : Any ) -> None:
for value in values:
self.__insert(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : str ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__UpperCAmelCase : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCAmelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase ( self : Dict , __lowercase : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__UpperCAmelCase : Tuple = self.root
if not self.empty():
while node.right is not None:
__UpperCAmelCase : Optional[Any] = node.right
return node
def UpperCAmelCase ( self : str , __lowercase : Node | None = None ) -> Node | None:
if node is None:
__UpperCAmelCase : Tuple = self.root
if self.root is None:
return None
if not self.empty():
__UpperCAmelCase : str = self.root
while node.left is not None:
__UpperCAmelCase : List[str] = node.left
return node
def UpperCAmelCase ( self : str , __lowercase : int ) -> None:
__UpperCAmelCase : List[Any] = self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
__UpperCAmelCase : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCAmelCase : str = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase ( self : Dict , __lowercase : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase ( self : Tuple , __lowercase : str=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase ( self : Tuple , __lowercase : list , __lowercase : Node | None ) -> None:
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def UpperCAmelCase ( self : str , __lowercase : int , __lowercase : Node ) -> int:
__UpperCAmelCase : list[int] = []
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : int = []
if curr_node is not None:
__UpperCAmelCase : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__UpperCAmelCase : Optional[Any] = BinarySearchTree()
for i in testlist:
t.insert(__lowerCamelCase )
# Prints all the elements of the list in order traversal
print(__lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCamelCase )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 63 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
A = HfArgumentParser(UpperCamelCase__ )
A = parser.parse_args_into_dataclasses()[0]
A = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
A = """"""
A = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 641 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_model(
"HTSAT-tiny" , "roberta" , _lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = r".*sequential.(\d+).*"
_lowerCAmelCase : Optional[int] = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase : Union[str, Any] = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
# replace sequential layers with list
_lowerCAmelCase : Optional[Any] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 )
_lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_lowerCamelCase )//3}.linear." )
elif re.match(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
_lowerCAmelCase : Any = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase : int = value
_lowerCAmelCase : str = mixed_qkv.size(0 ) // 3
_lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
_lowerCAmelCase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase : Dict = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase : Tuple = query_layer
_lowerCAmelCase : Optional[int] = key_layer
_lowerCAmelCase : Optional[Any] = value_layer
else:
_lowerCAmelCase : str = value
return model_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Any = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase )
clap_model.eval()
_lowerCAmelCase : Tuple = clap_model.state_dict()
_lowerCAmelCase : List[Any] = rename_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = ClapConfig()
_lowerCAmelCase : Tuple = enable_fusion
_lowerCAmelCase : str = ClapModel(_lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
transformers_config.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
'''simple docstring'''
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = name
lowerCAmelCase__ = val
def __str__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowerCamelCase_ ) -> str:
return self.val < other.val
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = self.build_heap(lowerCamelCase_ )
def __getitem__( self , lowerCamelCase_ ) -> Optional[int]:
return self.get_value(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
return (idx - 1) // 2
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
return idx * 2 + 1
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return idx * 2 + 2
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
return self.heap_dict[key]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = len(lowerCamelCase_ ) - 1
lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ )
for idx, i in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = idx
lowerCAmelCase__ = i.val
for i in range(lowerCamelCase_ , -1 , -1 ):
self.sift_down(lowerCamelCase_ , lowerCamelCase_ )
return array
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
while True:
lowerCAmelCase__ = self.get_left_child_idx(lowerCamelCase_ ) # noqa: E741
lowerCAmelCase__ = self.get_right_child_idx(lowerCamelCase_ )
lowerCAmelCase__ = idx
if l < len(lowerCamelCase_ ) and array[l] < array[idx]:
lowerCAmelCase__ = l
if r < len(lowerCamelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase__ = r
if smallest != idx:
lowerCAmelCase__ , lowerCAmelCase__ = array[smallest], array[idx]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase__ = smallest
else:
break
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase__ , lowerCAmelCase__ = self.heap[idx], self.heap[p]
lowerCAmelCase__ , lowerCAmelCase__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase__ = p
lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return self.heap[0]
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.heap[-1], self.heap[0]
lowerCAmelCase__ , lowerCAmelCase__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.heap.append(lowerCamelCase_ )
lowerCAmelCase__ = len(self.heap ) - 1
lowerCAmelCase__ = node.val
self.sift_up(len(self.heap ) - 1 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return len(self.heap ) == 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase__ = new_value
lowerCAmelCase__ = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCAmelCase = Node('''R''', -1)
__UpperCAmelCase = Node('''B''', 6)
__UpperCAmelCase = Node('''A''', 3)
__UpperCAmelCase = Node('''X''', 1)
__UpperCAmelCase = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_snake_case = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_snake_case = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_snake_case = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
return float((preds == labels).mean() )
def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: Union[str, Any]="binary" ):
"""simple docstring"""
_lowerCAmelCase = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
_lowerCAmelCase = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCAmelCase = [(pred, label)]
_lowerCAmelCase , _lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
_lowerCAmelCase , _lowerCAmelCase = zip(*SCREAMING_SNAKE_CASE )
_lowerCAmelCase = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='macro' )
fas.append(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ , fa_avg='macro' )
elif self.config_name == "record":
_lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase_ , UpperCAmelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 580 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCAmelCase ( __lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
snake_case__ : Any = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : int = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Tuple = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : str = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : int = defaults.commands
if not args.tpu_name:
snake_case__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : List[str] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Dict = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : Optional[int] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = tpu_command_parser()
snake_case__ : int = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 219 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = ShapEPipeline
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Tuple = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Optional[Any] = False
@property
def __lowerCamelCase ( self :Dict ):
return 3_2
@property
def __lowerCamelCase ( self :str ):
return 3_2
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :int ):
return 8
@property
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self :int ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
snake_case__ : Optional[int] = PriorTransformer(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Dict = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
snake_case__ : List[str] = ShapERenderer(**__lowercase )
return model
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = self.dummy_prior
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : Optional[int] = self.dummy_renderer
snake_case__ : str = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=__lowercase ,clip_sample=__lowercase ,clip_sample_range=1.0 ,)
snake_case__ : Tuple = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :str=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = '''cpu'''
snake_case__ : str = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__lowercase )
snake_case__ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__lowercase ) )
snake_case__ : Dict = output.images[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = torch_device == '''cpu'''
snake_case__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__lowercase ,relax_max_difference=__lowercase ,)
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**__lowercase )
snake_case__ : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Any = 1
snake_case__ : str = 2
snake_case__ : Any = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ : Optional[Any] = batch_size * [inputs[key]]
snake_case__ : Any = pipe(**__lowercase ,num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
snake_case__ : Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Tuple = pipe(
'''a shark''' ,generator=__lowercase ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase )
| 219 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.