code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 359
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 0
|
"""simple docstring"""
import re
def __UpperCAmelCase ( snake_case_ : str ) -> Union[str, Any]:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , __lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2_5_6
# Modulus to hash a string
SCREAMING_SNAKE_CASE : List[Any] = 1_0_0_0_0_0_3
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> bool:
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase__ )
_lowerCAmelCase = len(lowerCAmelCase__ )
if p_len > t_len:
return False
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase__ ):
_lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase = """abc1abc12"""
_lowerCAmelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_lowerCAmelCase = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) and not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 2)
_lowerCAmelCase = """ABABX"""
_lowerCAmelCase = """ABABZABABYABABX"""
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 3)
_lowerCAmelCase = """AAAB"""
_lowerCAmelCase = """ABAAAAAB"""
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 4)
_lowerCAmelCase = """abcdabcy"""
_lowerCAmelCase = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 5)
_lowerCAmelCase = """Lü"""
_lowerCAmelCase = """Lüsai"""
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase = """Lue"""
assert not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE : Dict = '''examples/'''
SCREAMING_SNAKE_CASE : str = {
'''examples''': (re.compile(R'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), '''check_min_version(\"VERSION\")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), '''__version__ = \"VERSION\"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R'''\1version=\"VERSION\",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), '''release = \"VERSION\"\n'''),
}
SCREAMING_SNAKE_CASE : str = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
SCREAMING_SNAKE_CASE : Optional[int] = '''README.md'''
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : int ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS[pattern]
_lowerCAmelCase = replace.replace("""VERSION""" , lowercase__ )
_lowerCAmelCase = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowercase__ )
def __UpperCAmelCase ( snake_case_ : Any ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="""examples""" )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
_lowerCAmelCase = '1. Want to contribute a new model?'
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start of the list.
_lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_lowerCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowercase__ )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def __UpperCAmelCase ( snake_case_ : Optional[Any]=False ) -> int:
"""simple docstring"""
_lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can\'t create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_lowerCAmelCase = default_version.base_version
elif patch:
_lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowercase__ ) == 0:
_lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = get_version()
_lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowercase__ ) == 0:
_lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowercase__ )
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 362
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = '''timm_backbone'''
def __init__(self , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = backbone
_lowerCAmelCase = num_channels
_lowerCAmelCase = features_only
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = True
_lowerCAmelCase = out_indices if out_indices is not None else (-1,)
| 363
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE : Any = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( a_ ):
__UpperCamelCase = '''esm'''
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_026 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = emb_layer_norm_before
_lowerCAmelCase = token_dropout
_lowerCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_lowerCAmelCase = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
_lowerCAmelCase = EsmFoldConfig(**lowercase_ )
_lowerCAmelCase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_lowerCAmelCase = get_default_vocab_list()
else:
_lowerCAmelCase = vocab_list
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
_lowerCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 0
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 128
__UpperCamelCase = None
def A__ (self ):
'''simple docstring'''
if self.trunk is None:
_lowerCAmelCase = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
_lowerCAmelCase = TrunkConfig(**self.trunk )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 48
__UpperCamelCase = 1_024
__UpperCamelCase = 128
__UpperCamelCase = 32
__UpperCamelCase = 32
__UpperCamelCase = 32
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = False
__UpperCamelCase = 4
__UpperCamelCase = 128
__UpperCamelCase = None
def A__ (self ):
'''simple docstring'''
if self.structure_module is None:
_lowerCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
_lowerCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 384
__UpperCamelCase = 128
__UpperCamelCase = 16
__UpperCamelCase = 128
__UpperCamelCase = 12
__UpperCamelCase = 4
__UpperCamelCase = 8
__UpperCamelCase = 0.1
__UpperCamelCase = 8
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 7
__UpperCamelCase = 10
__UpperCamelCase = 1E-8
__UpperCamelCase = 1E5
def A__ (self ):
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( snake_case_ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase = {'''+''', '''-''', '''*''', '''/'''}
_lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( snake_case_ : str = "" ):
"""simple docstring"""
_lowerCAmelCase = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
_lowerCAmelCase = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , """html.parser""" )
_lowerCAmelCase = soup.find_all("""td""" , attrs="""titleColumn""" )
_lowerCAmelCase = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase__ , lowerCAmelCase__ )
}
def __UpperCAmelCase ( snake_case_ : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
_lowerCAmelCase = get_imdb_top_aaa_movies()
with open(lowerCAmelCase__ , """w""" , newline="""""" ) as out_file:
_lowerCAmelCase = csv.writer(lowerCAmelCase__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 366
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = KandinskyVaaImgaImgPipeline
__UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__UpperCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase = False
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ (self ):
'''simple docstring'''
return 100
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def A__ (self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCAmelCase = DDIMScheduler(**lowerCAmelCase__ )
_lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
_lowerCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_lowerCAmelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = "cpu"
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
_lowerCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = "A red cartoon frog, 4k"
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=10 , lowerCamelCase=3 , lowerCamelCase=32 * 4 , lowerCamelCase=32 * 6 , lowerCamelCase=4 , lowerCamelCase=32 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = is_training
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = num_queries
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_size
_lowerCAmelCase = max_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = mask_feature_size
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
_lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
_lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ (self ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = output.encoder_hidden_states
_lowerCAmelCase = output.pixel_decoder_hidden_states
_lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = MaskFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerForInstanceSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(lowerCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__UpperCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def A__ (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase = MaskFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (self.model_tester.min_size,) * 2
_lowerCAmelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
"""class_labels""": torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
_lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def A__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE : Tuple = 1e-4
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase = inputs["""pixel_values"""].to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 368
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''DeiTFeatureExtractor''']
SCREAMING_SNAKE_CASE : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( snake_case_ : int = 20 ) -> int:
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase = lcm(UpperCAmelCase__ , UpperCAmelCase__ )
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 371
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 0
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE : Any = '''bert-base-cased'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''google/pegasus-xsum'''
SCREAMING_SNAKE_CASE : Any = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
SCREAMING_SNAKE_CASE : Optional[int] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
SCREAMING_SNAKE_CASE : str = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE : Optional[int] = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE : Tuple = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE : int = '''sshleifer/tiny-marian-en-de'''
def __UpperCAmelCase ( snake_case_ : Path , snake_case_ : list ) -> str:
"""simple docstring"""
_lowerCAmelCase = """\n""".join(__a )
Path(__a ).open("""w""" ).writelines(__a )
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"""{split}.source""" ) , __a )
_dump_articles(os.path.join(__a , F"""{split}.target""" ) , __a )
return tmp_dir
class __lowerCamelCase ( __lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(_a )
_lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCAmelCase = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
_lowerCAmelCase = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
_lowerCAmelCase = 4
_lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCAmelCase , _lowerCAmelCase = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_lowerCAmelCase = SeqaSeqDataset(
_a , data_dir=_a , type_path="""train""" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
_lowerCAmelCase = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCAmelCase = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(_a )
_lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCAmelCase = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
_lowerCAmelCase = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
_lowerCAmelCase = 4
_lowerCAmelCase = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="""train""" , max_source_length=20 , max_target_length=_a , )
_lowerCAmelCase = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
_lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCAmelCase = tmp_dir.joinpath("""train.source""" ).open().readlines()
_lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
_lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
_lowerCAmelCase = {x.name for x in save_dir.iterdir()}
_lowerCAmelCase = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def A__ (self ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_dataset(max_len=64 )
_lowerCAmelCase = 64
_lowerCAmelCase = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
_lowerCAmelCase = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
_lowerCAmelCase = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCAmelCase = []
_lowerCAmelCase = []
for batch in data_loader:
_lowerCAmelCase = batch["""input_ids"""].shape
_lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCAmelCase = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"""too many tokens in {len(_a )} batches""" )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_dataset(max_len=512 )
_lowerCAmelCase = 2
_lowerCAmelCase = ds.make_sortish_sampler(_a , shuffle=_a )
_lowerCAmelCase = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCAmelCase = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
_lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase , lowerCamelCase="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="""labels""" ) ) < sum(count_pad_tokens(_a , k="""labels""" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def A__ (self , lowerCamelCase=1_000 , lowerCamelCase=128 ):
'''simple docstring'''
if os.getenv("""USE_REAL_DATA""" , _a ):
_lowerCAmelCase = """examples/seq2seq/wmt_en_ro"""
_lowerCAmelCase = max_len * 2 * 64
if not Path(_a ).joinpath("""train.len""" ).exists():
save_len_file(_a , _a )
else:
_lowerCAmelCase = """examples/seq2seq/test_data/wmt_en_ro"""
_lowerCAmelCase = max_len * 4
save_len_file(_a , _a )
_lowerCAmelCase = AutoTokenizer.from_pretrained(_a )
_lowerCAmelCase = SeqaSeqDataset(
_a , data_dir=_a , type_path="""train""" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self._get_dataset()
_lowerCAmelCase = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
_lowerCAmelCase = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
_lowerCAmelCase = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
_lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCAmelCase = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
_lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 350
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase__ ):
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 351
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def __UpperCAmelCase ( ) -> str:
global _logger
_lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCamelCase ( __snake_case ):
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = lock_file
return None
def __str__(self ):
'''simple docstring'''
_lowerCAmelCase = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_lowerCAmelCase = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
_lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase = None
# The default timeout value.
_lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase = 0
return None
@property
def A__ (self ):
'''simple docstring'''
return self._lock_file
@property
def A__ (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = float(a_ )
return None
def A__ (self ):
'''simple docstring'''
raise NotImplementedError()
def A__ (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def A__ (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def A__ (self , lowerCamelCase=None , lowerCamelCase=0.05 ):
'''simple docstring'''
if timeout is None:
_lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
_lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def A__ (self , lowerCamelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_lowerCAmelCase = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
_lowerCAmelCase = os.path.dirname(a_ )
_lowerCAmelCase = str(hash(a_ ) )
_lowerCAmelCase = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class __lowerCamelCase ( __snake_case ):
def __init__(self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
_lowerCAmelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
_lowerCAmelCase = fd
return None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCamelCase ( __snake_case ):
def __init__(self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
_lowerCAmelCase = fd
return None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class __lowerCamelCase ( __snake_case ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , a_ )
except OSError:
pass
else:
_lowerCAmelCase = fd
return None
def A__ (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
_lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 352
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
def A__ (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = EfficientFormerImageProcessorTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if args.model_type == "roberta":
SCREAMING_SNAKE_CASE : int = RobertaForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE : Tuple = '''roberta'''
elif args.model_type == "gpt2":
SCREAMING_SNAKE_CASE : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE : Dict = '''transformer'''
SCREAMING_SNAKE_CASE : Tuple = model.state_dict()
SCREAMING_SNAKE_CASE : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
SCREAMING_SNAKE_CASE : Tuple = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
SCREAMING_SNAKE_CASE : Tuple = F'{prefix}.embeddings.{w}.weight'
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[param_name]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Optional[Any] = F'{prefix}.embeddings.LayerNorm.{w}'
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[param_name]
# Transformer Blocks #
SCREAMING_SNAKE_CASE : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : int = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Any = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[F'lm_head.dense.{w}']
SCREAMING_SNAKE_CASE : List[Any] = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[F'{prefix}.ln_f.{w}']
SCREAMING_SNAKE_CASE : Tuple = state_dict['''lm_head.weight''']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 355
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 0
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit_text_model"""
def __init__(self , lowerCamelCase=49_408 , lowerCamelCase=512 , lowerCamelCase=2_048 , lowerCamelCase=12 , lowerCamelCase=8 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=0 , lowerCamelCase=49_406 , lowerCamelCase=49_407 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_act
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_factor
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit_vision_model"""
def __init__(self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=32 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_factor
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit"""
__UpperCamelCase = True
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=2.6592 , lowerCamelCase=True , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
if text_config is None:
_lowerCAmelCase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_lowerCAmelCase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_lowerCAmelCase = OwlViTTextConfig(**lowerCamelCase )
_lowerCAmelCase = OwlViTVisionConfig(**lowerCamelCase )
_lowerCAmelCase = projection_dim
_lowerCAmelCase = logit_scale_init_value
_lowerCAmelCase = return_dict
_lowerCAmelCase = 1.0
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {}
_lowerCAmelCase = text_config
_lowerCAmelCase = vision_config
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.text_config.to_dict()
_lowerCAmelCase = self.vision_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class __lowerCamelCase ( __UpperCamelCase ):
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-4
def A__ (self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = None , ):
'''simple docstring'''
_lowerCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowerCamelCase , seq_length=lowerCamelCase , framework=lowerCamelCase )
_lowerCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowerCamelCase , framework=lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def A__ (self ):
'''simple docstring'''
return 14
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , 'Tatoeba directory does not exist.' )
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase_ )
@slow
def A__ (self ):
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCamelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 357
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE : Optional[int] = False
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ (self ):
'''simple docstring'''
return 12
@property
def A__ (self ):
'''simple docstring'''
return 12
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = 12
_lowerCAmelCase = 12
_lowerCAmelCase = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_lowerCAmelCase = TransformeraDModel(**__lowerCAmelCase )
return model
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.dummy_vqvae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_transformer
_lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCAmelCase )
_lowerCAmelCase = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
_lowerCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCAmelCase = """teddy bear playing in the pool"""
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCAmelCase = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.dummy_vqvae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_transformer
_lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_lowerCAmelCase = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
_lowerCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCAmelCase = """teddy bear playing in the pool"""
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCAmelCase = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_lowerCAmelCase = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_lowerCAmelCase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__lowerCAmelCase , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 358
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__UpperCamelCase = KandinskyImgaImgPipeline
__UpperCamelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__UpperCamelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__UpperCamelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return 32
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ (self ):
'''simple docstring'''
return 100
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_lowerCAmelCase = MultilingualCLIP(a_ )
_lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**a_ )
return model
@property
def A__ (self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**a_ )
_lowerCAmelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a_ ) ).to(a_ )
_lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(a_ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a_ ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(a_ )
else:
_lowerCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
_lowerCAmelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**a_ )
_lowerCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(a_ ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = """A red cartoon frog, 4k"""
_lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
_lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
a_ , image=a_ , image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_ , a_ )
| 359
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 0
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCAmelCase ( snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : float = 1 , snake_case_ : float = 1 , snake_case_ : float = 1.0e4 , snake_case_ : bool = False , snake_case_ : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
_lowerCAmelCase = float(embedding_dim // 2 )
_lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase__ , dtype=jnp.floataa ) * -log_timescale_increment )
_lowerCAmelCase = jnp.expand_dims(lowerCAmelCase__ , 1 ) * jnp.expand_dims(lowerCAmelCase__ , 0 )
# scale embeddings
_lowerCAmelCase = scale * emb
if flip_sin_to_cos:
_lowerCAmelCase = jnp.concatenate([jnp.cos(lowerCAmelCase__ ), jnp.sin(lowerCAmelCase__ )] , axis=1 )
else:
_lowerCAmelCase = jnp.concatenate([jnp.sin(lowerCAmelCase__ ), jnp.cos(lowerCAmelCase__ )] , axis=1 )
_lowerCAmelCase = jnp.reshape(lowerCAmelCase__ , [jnp.shape(lowerCAmelCase__ )[0], embedding_dim] )
return signal
class __lowerCamelCase ( nn.Module ):
__UpperCamelCase = 32
__UpperCamelCase = jnp.floataa
@nn.compact
def __call__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__lowerCAmelCase )
_lowerCAmelCase = nn.silu(__lowerCAmelCase )
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__lowerCAmelCase )
return temb
class __lowerCamelCase ( nn.Module ):
__UpperCamelCase = 32
__UpperCamelCase = False
__UpperCamelCase = 1
@nn.compact
def __call__(self , lowerCamelCase ):
'''simple docstring'''
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = '▁'
SCREAMING_SNAKE_CASE : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__(self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ (self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCamelCase ( __snake_case ):
@staticmethod
@abstractmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def A__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 362
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 0
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Any:
"""simple docstring"""
def run_func(snake_case_ : Optional[Any] ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*snake_case_ : int , **snake_case_ : Dict ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*snake_case_ : List[Any] , **snake_case_ : Tuple ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(_a , _a , _a )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(_a , _a , _a )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(_a , _a , _a )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(_a , _a , _a )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(_a , """architectures""" )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(_a , _a )
_lowerCAmelCase = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(_a , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_a , decoder_input_ids=_a , training=_a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_a , training=_a )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(_a , """architectures""" )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(_a , _a )
_lowerCAmelCase = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(_a , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(_a , decoder_input_ids=_a , labels=_a , training=_a )[0]
_lowerCAmelCase = tf.gradients(_a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(_a , labels=_a , training=_a )[0]
_lowerCAmelCase = tf.gradients(_a , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
_a , repeat=self.args.repeat , number=10 , )
return min(_a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(_a )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(_a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(_a )
_lowerCAmelCase = Memory(_a ) if isinstance(_a , _a ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(_a )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 363
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'CLIPImageProcessor'
__UpperCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
_lowerCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> int:
"""simple docstring"""
if num < 0:
return False
_lowerCAmelCase = num
_lowerCAmelCase = 0
while num > 0:
_lowerCAmelCase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 366
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
_lowerCAmelCase = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(UpperCamelCase__ ) , """Postfix""".center(UpperCamelCase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , ("""""".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=""" | """ , ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , ("""""".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def __UpperCAmelCase ( snake_case_ : Dict ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
_lowerCAmelCase = """)""" # change "(" to ")"
elif infix[i] == ")":
_lowerCAmelCase = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
SCREAMING_SNAKE_CASE : int = ''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCAmelCase ( snake_case_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = model.config
_lowerCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCAmelCase = MBartConfig(
is_decoder=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , add_cross_attention=__lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCamelCase , add_final_layer_norm=__lowerCamelCase , )
return encoder_config, decoder_config
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
if "encoder.model" in name:
_lowerCAmelCase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
_lowerCAmelCase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
_lowerCAmelCase = """encoder.""" + name
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_lowerCAmelCase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_lowerCAmelCase = """encoder.layernorm.bias"""
return name
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Dict ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = int(key_split[5] )
_lowerCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCAmelCase = val
return orig_state_dict
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : str=None , snake_case_ : Tuple=False ) -> Any:
"""simple docstring"""
_lowerCAmelCase = DonutModel.from_pretrained(__lowerCamelCase ).eval()
# load HuggingFace model
_lowerCAmelCase , _lowerCAmelCase = get_configs(__lowerCamelCase )
_lowerCAmelCase = DonutSwinModel(__lowerCamelCase )
_lowerCAmelCase = MBartForCausalLM(__lowerCamelCase )
_lowerCAmelCase = VisionEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
model.eval()
_lowerCAmelCase = original_model.state_dict()
_lowerCAmelCase = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify results on scanned document
_lowerCAmelCase = load_dataset("""hf-internal-testing/example-documents""" )
_lowerCAmelCase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
_lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained(__lowerCamelCase , from_slow=__lowerCamelCase )
_lowerCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCAmelCase = DonutProcessor(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase = """When is the coffee break?"""
_lowerCAmelCase = task_prompt.replace("""{user_input}""" , __lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCAmelCase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCAmelCase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCAmelCase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCAmelCase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCAmelCase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
_lowerCAmelCase = original_model.decoder.tokenizer(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="""pt""" )[
"""input_ids"""
]
_lowerCAmelCase = original_model.encoder.model.patch_embed(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = model.encoder.embeddings(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
# verify encoder hidden states
_lowerCAmelCase = original_model.encoder(__lowerCamelCase )
_lowerCAmelCase = model.encoder(__lowerCamelCase ).last_hidden_state
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-2 )
# verify decoder hidden states
_lowerCAmelCase = original_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).logits
_lowerCAmelCase = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 368
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'convbert'
def __init__(self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=768 , lowerCamelCase=2 , lowerCamelCase=9 , lowerCamelCase=1 , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class __lowerCamelCase ( __lowercase ):
@property
def A__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 369
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=0 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = projection_dim
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
_lowerCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFDPRContextEncoder(config=_a )
_lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a )
_lowerCAmelCase = model(_a , token_type_ids=_a )
_lowerCAmelCase = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFDPRQuestionEncoder(config=_a )
_lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a )
_lowerCAmelCase = model(_a , token_type_ids=_a )
_lowerCAmelCase = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = TFDPRReader(config=_a )
_lowerCAmelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
__UpperCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCamelCase = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFDPRModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_lowerCAmelCase = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowerCAmelCase = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowerCAmelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 370
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Dict = 1_6
SCREAMING_SNAKE_CASE : Any = 3_2
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int = 16 , snake_case_ : Optional[Any] = "bert-base-cased" ) -> Any:
"""simple docstring"""
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> int:
"""simple docstring"""
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config["""lr"""]
_lowerCAmelCase = int(config["""num_epochs"""] )
_lowerCAmelCase = int(config["""seed"""] )
_lowerCAmelCase = int(config["""batch_size"""] )
_lowerCAmelCase = args.model_name_or_path
set_seed(_lowerCAmelCase )
_lowerCAmelCase = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
_lowerCAmelCase = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
# Now we train the model
_lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase = 0
_lowerCAmelCase = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = model(**_lowerCAmelCase )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCAmelCase = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
_lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase )
_lowerCAmelCase = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_lowerCAmelCase = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_lowerCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowerCAmelCase , )
parser.add_argument(
"""--output_dir""" , type=_lowerCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowerCAmelCase , default=3 , help="""Number of train epochs.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 0
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = min(__lowerCAmelCase ) # min() finds the minimum value
_lowerCAmelCase = max(__lowerCAmelCase ) # max() finds the maximum value
_lowerCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase = 0
for count in range(__lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase = count + min_val
i += 1
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__lowerCAmelCase )
print("""Sorted order is:""" , """ """.join(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 0
|
"""simple docstring"""
from collections import namedtuple
SCREAMING_SNAKE_CASE : Any = namedtuple('''from_to''', '''from_ to''')
SCREAMING_SNAKE_CASE : Any = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_0_1, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"""cubicyard""": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"""cubicfoot""": from_to(0.0_2_8, 3_5.3_1_4_7),
"""cup""": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(_UpperCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(_UpperCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE : Any = get_logger(__name__)
class __lowerCamelCase :
def __init__(self , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = (
os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase = Extractor
def A__ (self , lowerCamelCase ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase = os.path.abspath(__SCREAMING_SNAKE_CASE )
return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE ) )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(__SCREAMING_SNAKE_CASE ) and not (os.path.isdir(__SCREAMING_SNAKE_CASE ) and os.listdir(__SCREAMING_SNAKE_CASE ))
)
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if not extractor_format:
return input_path
_lowerCAmelCase = self._get_output_path(__SCREAMING_SNAKE_CASE )
if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return output_path
class __lowerCamelCase ( lowerCAmelCase_ ):
@classmethod
@abstractmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
...
class __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
__UpperCamelCase = []
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
return f.read(__SCREAMING_SNAKE_CASE )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = b"" ):
'''simple docstring'''
if not magic_number:
_lowerCAmelCase = max(len(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase = cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except OSError:
return False
return any(magic_number.startswith(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
class __lowerCamelCase ( lowerCAmelCase_ ):
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def resolved(lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE ) )
def badpath(lowerCamelCase , lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).startswith(__SCREAMING_SNAKE_CASE )
def badlink(lowerCamelCase , lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase = resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = resolved(__SCREAMING_SNAKE_CASE )
for finfo in members:
if badpath(finfo.name , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = tarfile.open(__SCREAMING_SNAKE_CASE )
tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
tar_file.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x1F\x8B"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with gzip.open(__SCREAMING_SNAKE_CASE , """rb""" ) as gzip_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = b"" ):
'''simple docstring'''
if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as fp:
_lowerCAmelCase = _EndRecData(__SCREAMING_SNAKE_CASE )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase = fp.read(__SCREAMING_SNAKE_CASE ) # CD is where we expect it to be
if len(__SCREAMING_SNAKE_CASE ) == sizeCentralDir:
_lowerCAmelCase = struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , """r""" ) as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE )
zip_file.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with lzma.open(__SCREAMING_SNAKE_CASE ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rarfile.RarFile(__SCREAMING_SNAKE_CASE )
rf.extractall(__SCREAMING_SNAKE_CASE )
rf.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_lowerCAmelCase = zstd.ZstdDecompressor()
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as ifh, open(__SCREAMING_SNAKE_CASE , """wb""" ) as ofh:
dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x42\x5A\x68"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with bza.open(__SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , """r""" ) as archive:
archive.extractall(__SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__UpperCamelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A__ (cls ):
'''simple docstring'''
return max(
len(__SCREAMING_SNAKE_CASE )
for extractor in cls.extractors.values()
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE )
except OSError:
return b""
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A__ (cls , lowerCamelCase ): # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase = cls._get_magic_number_max_length()
_lowerCAmelCase = cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ):
return extractor_format
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE ) , exist_ok=__SCREAMING_SNAKE_CASE )
# Prevent parallel extractions
_lowerCAmelCase = str(Path(__SCREAMING_SNAKE_CASE ).with_suffix(""".lock""" ) )
with FileLock(__SCREAMING_SNAKE_CASE ):
shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = extractor if extractor != """deprecated""" else extractor_format
else:
_lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE ):
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 352
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase__ )
return parser.parse_args()
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_lowerCAmelCase = parse_args()
# Import training_script as a module.
_lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase = script_fpath.stem
_lowerCAmelCase = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
_lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 353
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCamelCase :
@staticmethod
def A__ (*lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
pass
def __UpperCAmelCase ( snake_case_ : List[str] ) -> Tuple:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__SCREAMING_SNAKE_CASE : int = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
_lowerCAmelCase = """What is the placebo?"""
_lowerCAmelCase = [
{
"""image""": load_image(_lowerCAmelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = dqa_pipeline(_lowerCAmelCase , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase ), """start""": ANY(_lowerCAmelCase ), """end""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase ), """start""": ANY(_lowerCAmelCase ), """end""": ANY(_lowerCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , words=_lowerCAmelCase , boxes=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowerCAmelCase )
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowerCAmelCase , revision="""3dc6de3""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowerCAmelCase )
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowerCAmelCase , revision="""3dc6de3""" , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def A__ (self ):
'''simple docstring'''
pass
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
random.seed(_A )
np.random.seed(_A )
torch.manual_seed(_A )
torch.cuda.manual_seed_all(_A )
# ^^ safe to call this function even if cuda is not available
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase = 0.9999 , lowerCamelCase = 0.0 , lowerCamelCase = 0 , lowerCamelCase = False , lowerCamelCase = 1.0 , lowerCamelCase = 2 / 3 , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
_lowerCAmelCase = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCAmelCase = True
if kwargs.get("""max_value""" , _SCREAMING_SNAKE_CASE ) is not None:
_lowerCAmelCase = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = kwargs["""max_value"""]
if kwargs.get("""min_value""" , _SCREAMING_SNAKE_CASE ) is not None:
_lowerCAmelCase = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = kwargs["""min_value"""]
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , _SCREAMING_SNAKE_CASE ) is not None:
_lowerCAmelCase = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
self.to(device=kwargs["""device"""] )
_lowerCAmelCase = None
_lowerCAmelCase = decay
_lowerCAmelCase = min_decay
_lowerCAmelCase = update_after_step
_lowerCAmelCase = use_ema_warmup
_lowerCAmelCase = inv_gamma
_lowerCAmelCase = power
_lowerCAmelCase = 0
_lowerCAmelCase = None # set in `step()`
_lowerCAmelCase = model_cls
_lowerCAmelCase = model_config
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = model_cls.load_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model_cls.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = cls(model.parameters() , model_cls=_SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(_SCREAMING_SNAKE_CASE )
return ema_model
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
_lowerCAmelCase = self.model_cls.from_config(self.model_config )
_lowerCAmelCase = self.state_dict()
state_dict.pop("""shadow_params""" , _SCREAMING_SNAKE_CASE )
model.register_to_config(**_SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCAmelCase = (1 + step) / (10 + step)
_lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
_lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
_lowerCAmelCase = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = parameters.parameters()
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCAmelCase = self.get_decay(self.optimization_step )
_lowerCAmelCase = decay
_lowerCAmelCase = 1 - decay
_lowerCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCAmelCase = deepspeed.zero.GatheredParameters(_SCREAMING_SNAKE_CASE , modifier_rank=_SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_SCREAMING_SNAKE_CASE )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def A__ (self , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = [
p.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=_SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def A__ (self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = [param.detach().cpu().clone() for param in parameters]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCAmelCase = None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
_lowerCAmelCase = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid min_decay""" )
_lowerCAmelCase = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid optimization_step""" )
_lowerCAmelCase = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid update_after_step""" )
_lowerCAmelCase = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid use_ema_warmup""" )
_lowerCAmelCase = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
_lowerCAmelCase = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
_lowerCAmelCase = state_dict.get("""shadow_params""" , _SCREAMING_SNAKE_CASE )
if shadow_params is not None:
_lowerCAmelCase = shadow_params
if not isinstance(self.shadow_params , _SCREAMING_SNAKE_CASE ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 355
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_lowerCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase ) , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , x.transpose() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , np.asarray(transpose(lowerCamelCase ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.reshape(lowerCamelCase , (4, 3) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.reshape(lowerCamelCase , (12, 5) ) ) )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.asarray(reshape(lowerCamelCase , (4, 3) ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.asarray(reshape(lowerCamelCase , (12, 5) ) ) ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.squeeze(lowerCamelCase ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.squeeze(lowerCamelCase , axis=2 ) ) )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.asarray(squeeze(lowerCamelCase ) ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.asarray(squeeze(lowerCamelCase , axis=2 ) ) ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.expand_dims(lowerCamelCase , axis=1 ) ) )
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.asarray(expand_dims(lowerCamelCase , axis=1 ) ) ) )
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 0
|
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = set(range(3 , snake_case_ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case_ , snake_case_ ) ) )
_lowerCAmelCase = [float(snake_case_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case_ , limit + 1 , snake_case_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 357
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
class __lowerCamelCase ( lowerCamelCase__ ):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class __lowerCamelCase :
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase="[CLS]" , lowerCamelCase=1 , lowerCamelCase="[SEP]" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=-100 , lowerCamelCase=0 , lowerCamelCase=True , ):
'''simple docstring'''
_lowerCAmelCase = {label: i for i, label in enumerate(lowercase__ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(lowercase__ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , lowercase__ , len(lowercase__ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(lowercase__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase__ ) > 0:
tokens.extend(lowercase__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase__ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(lowercase__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowercase__ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(lowercase__ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , label_ids=lowercase__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCamelCase ( lowerCamelCase__ ):
__UpperCamelCase = 42
__UpperCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase=False , lowerCamelCase = Split.train , ):
'''simple docstring'''
_lowerCAmelCase = os.path.join(
lowercase__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowercase__ ):
if os.path.exists(lowercase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase = torch.load(lowercase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase__ , lowercase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , lowercase__ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , lowerCamelCase ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = -100
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase=False , lowerCamelCase = Split.train , ):
'''simple docstring'''
_lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase__ , lowercase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , lowerCamelCase ):
'''simple docstring'''
return self.features[i]
| 358
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 0
|
"""simple docstring"""
import os
def __UpperCAmelCase ( snake_case_ : str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as in_file:
_lowerCAmelCase = in_file.read()
_lowerCAmelCase = [[int(_UpperCAmelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
_lowerCAmelCase = [[0 for cell in row] for row in grid]
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
_lowerCAmelCase = grid[0][0]
for i in range(1 , _UpperCAmelCase ):
_lowerCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , _UpperCAmelCase ):
_lowerCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , _UpperCAmelCase ):
for j in range(1 , _UpperCAmelCase ):
_lowerCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 359
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 0
|
"""simple docstring"""
from math import factorial
def __UpperCAmelCase ( snake_case_ : int = 100 ) -> List[str]:
"""simple docstring"""
return sum(int(snake_case_ ) for x in str(factorial(snake_case_ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Optional[Any] = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : int = 1 / 1_0_0
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : int = ""
SCREAMING_SNAKE_CASE : Optional[Any] = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
for index in range(__lowerCAmelCase ):
_lowerCAmelCase = random.sample(range(len(__lowerCAmelCase ) ) , 4 )
_lowerCAmelCase = update_image_and_anno(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , filter_scale=__lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__lowerCAmelCase )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Tuple ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(__lowerCAmelCase , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(__lowerCAmelCase ):
_lowerCAmelCase = all_img_list[index]
path_list.append(__lowerCAmelCase )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(__lowerCAmelCase )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(__lowerCAmelCase , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(__lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(__lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
__lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : Any ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 362
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCamelCase ( _A ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_attention_heads""" ) )
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 6, 8] , lowerCamelCase=[2, 3, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=2 , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = kernel_size
_lowerCAmelCase = stride
_lowerCAmelCase = padding
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = depths
_lowerCAmelCase = key_dim
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = patch_size
_lowerCAmelCase = attention_ratio
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = initializer_range
_lowerCAmelCase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = num_labels
_lowerCAmelCase = initializer_range
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def A__ (self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = LevitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LevitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _A , _A , unittest.TestCase ):
__UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = LevitModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ (self ):
'''simple docstring'''
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase = False
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
_lowerCAmelCase = problem_type["""title"""]
_lowerCAmelCase = problem_type["""num_labels"""]
_lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
_lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
_lowerCAmelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_lowerCAmelCase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as warning_list:
_lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def A__ (self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = LevitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 363
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
def __lt__(self , lowerCamelCase ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__(self , lowerCamelCase ):
'''simple docstring'''
return self[-1] == other[-1]
def __UpperCAmelCase ( snake_case_ : str ) -> list:
"""simple docstring"""
_lowerCAmelCase = []
# sort into stacks
for element in collection:
_lowerCAmelCase = Stack([element] )
_lowerCAmelCase = bisect_left(__snake_case , __snake_case )
if i != len(__snake_case ):
stacks[i].append(__snake_case )
else:
stacks.append(__snake_case )
# use a heap-based merge to merge stack efficiently
_lowerCAmelCase = merge(*(reversed(__snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __UpperCAmelCase ( snake_case_ : SplitDict ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = split_dict._to_yaml_list()
assert len(_a ) == len(_a )
_lowerCAmelCase = SplitDict._from_yaml_list(_a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_a ), SplitInfo(dataset_name="""my_dataset""" )] )
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 365
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Tuple ):
"""simple docstring"""
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCAmelCase = True
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCAmelCase = True
if a[i].islower():
_lowerCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
"""simple docstring"""
import string
def __UpperCAmelCase ( snake_case_ : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase = string.ascii_uppercase.find(snake_case_ )
_lowerCAmelCase = num - key
if num < 0:
_lowerCAmelCase = num + len(string.ascii_uppercase )
_lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase = input("""Encrypted message: """ )
_lowerCAmelCase = message.upper()
decrypt(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : List[str] = 0 ) -> list:
"""simple docstring"""
_lowerCAmelCase = length or len(a__ )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(a__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"shortest_edge": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCamelCase ( a__ , unittest.TestCase ):
__UpperCamelCase = MobileViTImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_flip_channel_order""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 370
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( __lowercase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=True , lowerCamelCase=1_024 , lowerCamelCase=32 , lowerCamelCase="relu6" , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = depth_multiplier
_lowerCAmelCase = min_depth
_lowerCAmelCase = tf_padding
_lowerCAmelCase = int(last_hidden_size * depth_multiplier )
_lowerCAmelCase = output_stride
_lowerCAmelCase = hidden_act
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = use_labels
_lowerCAmelCase = is_training
_lowerCAmelCase = num_labels
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ (self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaModelTester(self )
_lowerCAmelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ (self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 371
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 0
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE : Tuple = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
SCREAMING_SNAKE_CASE : Tuple = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
SCREAMING_SNAKE_CASE : Optional[Any] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : int ) -> Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Dict ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = np.array(lowerCamelCase__ )
_lowerCAmelCase = np.array(lowerCamelCase__ )
_lowerCAmelCase = en_sentvecs.shape[0]
# mean centering
_lowerCAmelCase = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
_lowerCAmelCase = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
_lowerCAmelCase = cdist(lowerCamelCase__ , lowerCamelCase__ , """cosine""" )
_lowerCAmelCase = np.array(range(lowerCamelCase__ ) )
_lowerCAmelCase = sim.argsort(axis=1 )[:, :10]
_lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def A__ (self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCamelCase , lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 350
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 0
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.load_iris()
SCREAMING_SNAKE_CASE : str = np.array(data['''data'''])
SCREAMING_SNAKE_CASE : Optional[int] = np.array(data['''target'''])
SCREAMING_SNAKE_CASE : List[str] = data['''target_names''']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = train_test_split(X, y)
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int]=5 ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
_lowerCAmelCase = []
for data_point in data:
_lowerCAmelCase = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_lowerCAmelCase = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 351
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE : int = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
__UpperCamelCase = 'all_checks'
__UpperCamelCase = 'basic_checks'
__UpperCamelCase = 'no_checks'
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
def __UpperCAmelCase ( snake_case_ : Optional[dict] , snake_case_ : dict , snake_case_ : Tuple=None ) -> str:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_lowerCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_lowerCAmelCase = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn\'t match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
def __UpperCAmelCase ( snake_case_ : Optional[dict] , snake_case_ : dict ) -> str:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_lowerCAmelCase = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : bool = True ) -> int:
if record_checksum:
_lowerCAmelCase = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(UpperCamelCase__ )
_lowerCAmelCase = m.hexdigest()
else:
_lowerCAmelCase = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def __UpperCAmelCase ( snake_case_ : List[str] ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 352
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(UpperCAmelCase_ ) + """/p022_names.txt""" ) as file:
_lowerCAmelCase = str(file.readlines()[0] )
_lowerCAmelCase = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i, name in enumerate(UpperCAmelCase_ ):
for letter in name:
name_score += ord(UpperCAmelCase_ ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 353
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__SCREAMING_SNAKE_CASE : str = False
try:
__SCREAMING_SNAKE_CASE : int = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __lowerCamelCase :
def __init__(self , lowerCamelCase = None , lowerCamelCase = [] ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = "*"
else:
_lowerCAmelCase = "➔ "
def A__ (self , lowerCamelCase , lowerCamelCase = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , lowerCamelCase )
else:
forceWrite(self.choices[index] , lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(lowerCamelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def A__ (self , lowerCamelCase , lowerCamelCase = 1 ):
'''simple docstring'''
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCamelCase )
move_cursor(lowerCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def A__ (self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def A__ (self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def A__ (self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def A__ (self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCamelCase )] for number in range(10 )] )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCamelCase )
else:
return
else:
return
def A__ (self , lowerCamelCase = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCamelCase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(lowerCamelCase , """\n""" )
return choice
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from manim import *
class __lowerCamelCase ( __lowercase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_lowerCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_lowerCAmelCase = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.""" , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(lowerCamelCase_ ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 355
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 0
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_lowerCAmelCase = TOKENIZER_CLASSES
else:
_lowerCAmelCase = {tokenizer_name: getattr(__UpperCAmelCase , tokenizer_name + """Fast""" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_lowerCAmelCase = TOKENIZER_CLASSES[tokenizer_name]
_lowerCAmelCase = True
if checkpoint_name is None:
_lowerCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCAmelCase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_lowerCAmelCase = tokenizer_class.from_pretrained(__UpperCAmelCase , force_download=__UpperCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCAmelCase , _lowerCAmelCase = checkpoint.split("""/""" )
_lowerCAmelCase = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
elif add_prefix:
_lowerCAmelCase = checkpoint
_lowerCAmelCase = dump_path
else:
_lowerCAmelCase = None
_lowerCAmelCase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCAmelCase = file_path.split(__UpperCAmelCase )[-1][0]
if next_char == "/":
_lowerCAmelCase = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_lowerCAmelCase = tokenizer.save_pretrained(
__UpperCAmelCase , legacy_format=__UpperCAmelCase , filename_prefix=__UpperCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 0
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE : Union[str, Any] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE : str = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''\[([^\]]+)\]''')
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : List[Any]="" , snake_case_ : List[str]=None , snake_case_ : List[Any]=None ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
_lowerCAmelCase = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
_lowerCAmelCase = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase = []
else:
blocks.append("""\n""".join(lowercase_ ) )
_lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __UpperCAmelCase ( snake_case_ : str ) -> List[str]:
"""simple docstring"""
def _inner(snake_case_ : List[Any] ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Dict=None ) -> Dict:
"""simple docstring"""
def noop(snake_case_ : Optional[int] ):
return x
if key is None:
_lowerCAmelCase = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
_lowerCAmelCase = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
def _replace(snake_case_ : int ):
_lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
_lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
_lowerCAmelCase = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase = sort_objects(lowercase_ , key=lambda snake_case_ : x[1] )
_lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase = keys[:-1]
_lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : List[str]=True ) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="""utf-8""" ) as f:
_lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase = main_blocks[block_idx]
_lowerCAmelCase = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
_lowerCAmelCase = [x[0] for x in sorted(lowercase_ , key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase = 0
_lowerCAmelCase = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def __UpperCAmelCase ( snake_case_ : Optional[Any]=True ) -> str:
"""simple docstring"""
_lowerCAmelCase = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_lowerCAmelCase = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
_lowerCAmelCase = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
SCREAMING_SNAKE_CASE : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 357
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE : int = R'''\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'''
@add_start_docstrings(__lowercase )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'rag'
__UpperCamelCase = True
def __init__(self , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=" / " , lowerCamelCase=" // " , lowerCamelCase=5 , lowerCamelCase=300 , lowerCamelCase=768 , lowerCamelCase=8 , lowerCamelCase="wiki_dpr" , lowerCamelCase="train" , lowerCamelCase="compressed" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase = kwargs.pop("""question_encoder""" )
_lowerCAmelCase = question_encoder_config.pop("""model_type""" )
_lowerCAmelCase = kwargs.pop("""generator""" )
_lowerCAmelCase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase = AutoConfig.for_model(__A , **__A )
_lowerCAmelCase = AutoConfig.for_model(__A , **__A )
_lowerCAmelCase = reduce_loss
_lowerCAmelCase = label_smoothing
_lowerCAmelCase = exclude_bos_score
_lowerCAmelCase = do_marginalize
_lowerCAmelCase = title_sep
_lowerCAmelCase = doc_sep
_lowerCAmelCase = n_docs
_lowerCAmelCase = max_combined_length
_lowerCAmelCase = dataset
_lowerCAmelCase = dataset_split
_lowerCAmelCase = index_name
_lowerCAmelCase = retrieval_vector_size
_lowerCAmelCase = retrieval_batch_size
_lowerCAmelCase = passages_path
_lowerCAmelCase = index_path
_lowerCAmelCase = use_dummy_dataset
_lowerCAmelCase = output_retrieved
_lowerCAmelCase = do_deduplication
_lowerCAmelCase = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase = getattr(self.generator , """forced_eos_token_id""" , __A )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.question_encoder.to_dict()
_lowerCAmelCase = self.generator.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 359
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 0
|
"""simple docstring"""
import operator as op
SCREAMING_SNAKE_CASE : str = "scaler.pt"
SCREAMING_SNAKE_CASE : List[str] = "pytorch_model"
SCREAMING_SNAKE_CASE : List[str] = "random_states"
SCREAMING_SNAKE_CASE : Optional[int] = "optimizer"
SCREAMING_SNAKE_CASE : Any = "scheduler"
SCREAMING_SNAKE_CASE : Optional[int] = "pytorch_model.bin"
SCREAMING_SNAKE_CASE : List[str] = "pytorch_model.bin.index.json"
SCREAMING_SNAKE_CASE : Optional[int] = "model.safetensors"
SCREAMING_SNAKE_CASE : int = "model.safetensors.index.json"
SCREAMING_SNAKE_CASE : Optional[Any] = "1.10.2"
SCREAMING_SNAKE_CASE : Optional[Any] = "py38"
SCREAMING_SNAKE_CASE : Any = "4.17.0"
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
SCREAMING_SNAKE_CASE : Tuple = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
SCREAMING_SNAKE_CASE : Dict = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
SCREAMING_SNAKE_CASE : Tuple = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
SCREAMING_SNAKE_CASE : int = "2.0.1"
SCREAMING_SNAKE_CASE : int = ["pdsh", "standard", "openmpi", "mvapich"]
SCREAMING_SNAKE_CASE : str = ["default", "reduce-overhead", "max-autotune"]
SCREAMING_SNAKE_CASE : Tuple = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE : Optional[int] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
SCREAMING_SNAKE_CASE : List[Any] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
SCREAMING_SNAKE_CASE : Tuple = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE : List[str] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase = bs[:]
_lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def __UpperCAmelCase ( snake_case_ : Any ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
return pairs
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase = json.load(lowerCamelCase_ )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = errors # how to handle errors in decoding
_lowerCAmelCase = bytes_to_unicode()
_lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowerCAmelCase = {}
_lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ (self ):
'''simple docstring'''
return len(self.encoder )
def A__ (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(lowerCamelCase_ )
_lowerCAmelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_lowerCAmelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(lowerCamelCase_ ):
try:
_lowerCAmelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(lowerCamelCase_ )
_lowerCAmelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_lowerCAmelCase = get_pairs(lowerCamelCase_ )
_lowerCAmelCase = """ """.join(lowerCamelCase_ )
_lowerCAmelCase = word
return word
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
_lowerCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """""".join(lowerCamelCase_ )
_lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
_lowerCAmelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ (self , lowerCamelCase , lowerCamelCase=False , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
_lowerCAmelCase = """ """ + text
return (text, kwargs)
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
'''simple docstring'''
_lowerCAmelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
_lowerCAmelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 362
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 0
|
"""simple docstring"""
from typing import List
import numpy as np
def __UpperCAmelCase ( snake_case_ : dict ) -> int:
"""simple docstring"""
_lowerCAmelCase = {key: len(_lowercase ) for key, value in gen_kwargs.items() if isinstance(_lowercase , _lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
_lowerCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , _lowercase )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> List[range]:
"""simple docstring"""
_lowerCAmelCase = []
for group_idx in range(_lowercase ):
_lowerCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCAmelCase = range(_lowercase , start + num_shards_to_add )
shards_indices_per_group.append(_lowercase )
return shards_indices_per_group
def __UpperCAmelCase ( snake_case_ : dict , snake_case_ : int ) -> List[dict]:
"""simple docstring"""
_lowerCAmelCase = _number_of_shards_in_gen_kwargs(_lowercase )
if num_shards == 1:
return [dict(_lowercase )]
else:
_lowerCAmelCase = _distribute_shards(num_shards=_lowercase , max_num_jobs=_lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowercase , _lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowercase ) )
]
def __UpperCAmelCase ( snake_case_ : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCAmelCase ( snake_case_ : np.random.Generator , snake_case_ : dict ) -> dict:
"""simple docstring"""
_lowerCAmelCase = {len(_lowercase ) for value in gen_kwargs.values() if isinstance(_lowercase , _lowercase )}
_lowerCAmelCase = {}
for size in list_sizes:
_lowerCAmelCase = list(range(_lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCAmelCase = dict(_lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = [value[i] for i in indices_per_size[len(_lowercase )]]
return shuffled_kwargs
| 363
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 0
|
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : int = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : int ) -> List[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = """"""
else:
_lowerCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = dct.pop(snake_case__ )
_lowerCAmelCase = val
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str]=False ) -> Any:
"""simple docstring"""
_lowerCAmelCase = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
_lowerCAmelCase = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1000 )
_lowerCAmelCase = False
# load original model from timm
_lowerCAmelCase = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
_lowerCAmelCase = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase = ViTHybridModel(snake_case__ ).eval()
else:
_lowerCAmelCase = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
_lowerCAmelCase = create_transform(**resolve_data_config({} , model=snake_case__ ) )
_lowerCAmelCase = transform.transforms
_lowerCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_lowerCAmelCase = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = transform(snake_case__ ).unsqueeze(0 )
_lowerCAmelCase = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
_lowerCAmelCase = model(snake_case__ )
_lowerCAmelCase = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
_lowerCAmelCase = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
SCREAMING_SNAKE_CASE : List[str] = TypeVar('''U''')
class __lowerCamelCase ( Generic[T, U] ):
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__(self ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __lowerCamelCase ( Generic[T, U] ):
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = self.rear, self.head
def __repr__(self ):
'''simple docstring'''
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE_ )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCamelCase ( Generic[T, U] ):
__UpperCamelCase = {}
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__(self ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__(self , lowerCamelCase ):
'''simple docstring'''
return key in self.cache
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE_ )
return node.val
self.miss += 1
return None
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(SCREAMING_SNAKE_CASE_ )
@classmethod
def A__ (cls , lowerCamelCase = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*SCREAMING_SNAKE_CASE_ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE_ , """cache_info""" , SCREAMING_SNAKE_CASE_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
__UpperCamelCase = ['input_features']
def __init__(self , lowerCamelCase=80 , lowerCamelCase=16_000 , lowerCamelCase=160 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=0.0 , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
_lowerCAmelCase = n_fft
_lowerCAmelCase = hop_length
_lowerCAmelCase = chunk_length
_lowerCAmelCase = chunk_length * sampling_rate
_lowerCAmelCase = self.n_samples // hop_length
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
_lowerCAmelCase = log_spec[:, :-1]
_lowerCAmelCase = np.maximum(UpperCamelCase__ , log_spec.max() - 8.0 )
_lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
_lowerCAmelCase = np.array(UpperCamelCase__ , np.intaa )
_lowerCAmelCase = []
for vector, length in zip(UpperCamelCase__ , attention_mask.sum(-1 ) ):
_lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_lowerCAmelCase = padding_value
normed_input_values.append(UpperCamelCase__ )
else:
_lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__(self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "max_length" , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_lowerCAmelCase = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
_lowerCAmelCase = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray([raw_speech] ).T]
_lowerCAmelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_lowerCAmelCase = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=max_length if max_length else self.n_samples , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
_lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
_lowerCAmelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
_lowerCAmelCase = [self._np_extract_fbank_features(UpperCamelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCamelCase__ ):
_lowerCAmelCase = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features]
else:
_lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase , )
assert hasattr(self , """env""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {
'enabled': True,
'processes_per_host': 8,
}
_lowerCAmelCase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowerCAmelCase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowerCAmelCase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="""py36""" , )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase )
| 368
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 0
|
"""simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE : List[str] = tuple[int, int]
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = set()
def A__ (self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def A__ (self ):
'''simple docstring'''
return len(self.elements ) == 0
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
_lowerCAmelCase = []
((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if item in self.set:
self.set.remove(UpperCamelCase__ )
_lowerCAmelCase = []
((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A__ (self ):
'''simple docstring'''
return self.elements[0][1]
def A__ (self ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = np.array(__UpperCamelCase )
_lowerCAmelCase = np.array(__UpperCamelCase )
return np.linalg.norm(a - b )
def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> str:
"""simple docstring"""
return consistent_heuristic(__UpperCamelCase , __UpperCamelCase ) // t
def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> Dict:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : int , snake_case_ : TPos , snake_case_ : dict[TPos, float] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = g_function[start] + Wa * heuristics[i](__UpperCamelCase , __UpperCamelCase )
return ans
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = np.chararray((n, n) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
_lowerCAmelCase = """*"""
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
_lowerCAmelCase = """#"""
_lowerCAmelCase = """-"""
_lowerCAmelCase = back_pointer[goal]
while x != start:
((_lowerCAmelCase) , (_lowerCAmelCase)) = x
# print(x)
_lowerCAmelCase = """-"""
_lowerCAmelCase = back_pointer[x]
_lowerCAmelCase = """-"""
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
_lowerCAmelCase = back_pointer[goal]
while x != start:
print(__UpperCamelCase , end=""" """ )
_lowerCAmelCase = back_pointer[x]
print(__UpperCamelCase )
sys.exit()
def __UpperCAmelCase ( snake_case_ : TPos ) -> int:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , ) -> Tuple:
"""simple docstring"""
for itera in range(__UpperCamelCase ):
open_list[itera].remove_element(__UpperCamelCase )
# print("s", s)
# print("j", j)
((_lowerCAmelCase) , (_lowerCAmelCase)) = s
_lowerCAmelCase = (x - 1, y)
_lowerCAmelCase = (x + 1, y)
_lowerCAmelCase = (x, y + 1)
_lowerCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__UpperCamelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = float("""inf""" )
if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
_lowerCAmelCase = g_function[s] + 1
_lowerCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(__UpperCamelCase , key(__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __UpperCamelCase ):
if key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) <= Wa * key(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ):
open_list[j].put(
__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE : List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
SCREAMING_SNAKE_CASE : List[str] = make_common_ground()
SCREAMING_SNAKE_CASE : Any = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = 2_0
SCREAMING_SNAKE_CASE : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE : Optional[Any] = (0, 0)
SCREAMING_SNAKE_CASE : Dict = (n - 1, n - 1)
SCREAMING_SNAKE_CASE : Dict = 1
def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos , snake_case_ : int ) -> str:
"""simple docstring"""
_lowerCAmelCase = {start: 0, goal: float("""inf""" )}
_lowerCAmelCase = {start: -1, goal: -1}
_lowerCAmelCase = []
_lowerCAmelCase = set()
for i in range(__UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , __UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase , _lowerCAmelCase = open_list[i].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_inad.append(__UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase = open_list[0].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_anchor.append(__UpperCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__UpperCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 369
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = [False] * len(snake_case_ )
_lowerCAmelCase = [-1] * len(snake_case_ )
def dfs(snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
_lowerCAmelCase = True
_lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case_ , 1 - c )
for i in range(len(snake_case_ ) ):
if not visited[i]:
dfs(snake_case_ , 0 )
for i in range(len(snake_case_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 370
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 0
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , **snake_case_ : List[str] ) -> str:
"""simple docstring"""
_lowerCAmelCase = AutoConfig.from_pretrained(a__ , **a__ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(a__ )
model.save_pretrained(a__ )
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 371
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
return "".join(sorted(__a ) )
def __UpperCAmelCase ( snake_case_ : Any ) -> Optional[Any]:
"""simple docstring"""
return word_by_signature[signature(__a )]
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
SCREAMING_SNAKE_CASE : Dict = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE : Dict = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 350
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase = StableDiffusionLatentUpscalePipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([] )
__UpperCamelCase = True
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 4
_lowerCAmelCase = (16, 16)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowerCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=lowerCamelCase , only_cross_attention=lowerCamelCase , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
_lowerCAmelCase = EulerDiscreteScheduler(prediction_type="""sample""" )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
_lowerCAmelCase = CLIPTextModel(lowerCamelCase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCAmelCase = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def A__ (self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def A__ (self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A__ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def A__ (self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def A__ (self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def A__ (self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = 2
_lowerCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase = getattr(lowerCamelCase , scheduler_enum.name )
_lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase = pipe(**lowerCamelCase )[0]
outputs.append(lowerCamelCase )
assert check_same_shape(lowerCamelCase )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(33 )
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
_lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_lowerCAmelCase = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , output_type="""latent""" ).images
_lowerCAmelCase = upscaler(
prompt=lowerCamelCase , image=lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase , output_type="""np""" , ).images[0]
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(33 )
_lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_lowerCAmelCase = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
_lowerCAmelCase = upscaler(
prompt=lowerCamelCase , image=lowerCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase , output_type="""np""" , ).images[0]
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 351
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE : List[str] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
SCREAMING_SNAKE_CASE : str = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
SCREAMING_SNAKE_CASE : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __UpperCAmelCase ( snake_case_ : str ) -> List[Any]:
_lowerCAmelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCAmelCase ( snake_case_ : tuple ) -> Tuple:
return x[0]
def __UpperCAmelCase ( snake_case_ : str ) -> Dict:
_lowerCAmelCase = get_letter_count(lowerCAmelCase__ )
_lowerCAmelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
_lowerCAmelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
_lowerCAmelCase = ''''''.join(freq_to_letter[freq] )
_lowerCAmelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
_lowerCAmelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCAmelCase ( snake_case_ : str ) -> Union[str, Any]:
_lowerCAmelCase = get_frequency_order(lowerCAmelCase__ )
_lowerCAmelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Tuple = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__(self ):
'''simple docstring'''
self.test()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
_lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.update(UpperCamelCase_ )
counter += 1
if counter > 10_000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def A__ (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def A__ (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def A__ (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def A__ (self , lowerCamelCase=False ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__(self , lowerCamelCase ):
'''simple docstring'''
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_lowerCAmelCase = token_ids
_lowerCAmelCase = len(self.token_ids )
_lowerCAmelCase = -1 # the index of the currently fulfilled step
_lowerCAmelCase = False
def A__ (self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(UpperCamelCase_ ):
self.fulfilled_idx += 1
_lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowerCAmelCase = True
_lowerCAmelCase = completed
else:
# failed to make progress.
_lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = False
_lowerCAmelCase = 0
def A__ (self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def A__ (self , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.fulfilled_idx
_lowerCAmelCase = self.completed
return new_constraint
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase = max([len(UpperCamelCase_ ) for one in nested_token_ids] )
_lowerCAmelCase = {}
for token_ids in nested_token_ids:
_lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase_ ):
if token_id not in level:
_lowerCAmelCase = {}
_lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
"""Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
_lowerCAmelCase = root
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.trie
for current_token in current_seq:
_lowerCAmelCase = start[current_token]
_lowerCAmelCase = list(start.keys() )
return next_tokens
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.next_tokens(UpperCamelCase_ )
return len(UpperCamelCase_ ) == 0
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = list(root.values() )
if len(UpperCamelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.count_leaves(UpperCamelCase_ )
return len(UpperCamelCase_ ) != leaf_count
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__(self , lowerCamelCase ):
'''simple docstring'''
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_lowerCAmelCase = DisjunctiveTrie(UpperCamelCase_ )
_lowerCAmelCase = nested_token_ids
_lowerCAmelCase = self.trie.max_height
_lowerCAmelCase = []
_lowerCAmelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(UpperCamelCase_ ):
self.current_seq.append(UpperCamelCase_ )
_lowerCAmelCase = True
else:
_lowerCAmelCase = True
self.reset()
_lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
_lowerCAmelCase = completed
return stepped, completed, reset
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = False
_lowerCAmelCase = []
def A__ (self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def A__ (self , lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.current_seq
_lowerCAmelCase = self.completed
return new_constraint
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
_lowerCAmelCase = max([c.seqlen for c in constraints] )
_lowerCAmelCase = len(UpperCamelCase_ )
_lowerCAmelCase = False
self.init_state()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = None
_lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints]
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
else:
_lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def A__ (self , lowerCamelCase ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowerCAmelCase , _lowerCAmelCase = self.add(UpperCamelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_lowerCAmelCase , _lowerCAmelCase = False, False
if self.completed:
_lowerCAmelCase = True
_lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) )
_lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pending_constraint.update(UpperCamelCase_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(UpperCamelCase_ )
_lowerCAmelCase = None
if not complete and stepped:
_lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A__ (self , lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase_ )
_lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 355
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = set()
# Replace all the whitespace in our sentence
_lowerCAmelCase = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase_ ) == 26
def __UpperCAmelCase ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = [False] * 26
for char in input_str:
if char.islower():
_lowerCAmelCase = True
elif char.isupper():
_lowerCAmelCase = True
return all(lowerCamelCase_ )
def __UpperCAmelCase ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> int:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
from timeit import timeit
_lowerCAmelCase = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit("""is_pangram()""" , setup=lowerCamelCase_ ) )
print(timeit("""is_pangram_faster()""" , setup=lowerCamelCase_ ) )
print(timeit("""is_pangram_fastest()""" , setup=lowerCamelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
| 0
|
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int:
"""simple docstring"""
while a != 0:
_lowerCAmelCase = b % a, a
return b
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int:
"""simple docstring"""
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
_lowerCAmelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCAmelCase_ )
_lowerCAmelCase = 1, 0, a
_lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 357
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 0
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __lowerCamelCase ( lowerCamelCase__ ):
__UpperCamelCase = 'xlm-prophetnet'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__(self , lowerCamelCase = 0.1 , lowerCamelCase = "gelu" , lowerCamelCase = 30_522 , lowerCamelCase = 1_024 , lowerCamelCase = 4_096 , lowerCamelCase = 12 , lowerCamelCase = 16 , lowerCamelCase = 4_096 , lowerCamelCase = 12 , lowerCamelCase = 16 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 512 , lowerCamelCase = 0.02 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = 2 , lowerCamelCase = 32 , lowerCamelCase = 128 , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = 1 , lowerCamelCase = 2 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = num_encoder_layers
_lowerCAmelCase = num_encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = num_decoder_layers
_lowerCAmelCase = num_decoder_attention_heads
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = init_std # Normal(0, this parameter)
_lowerCAmelCase = activation_function
# parameters for xlmprophetnet
_lowerCAmelCase = ngram
_lowerCAmelCase = num_buckets
_lowerCAmelCase = relative_max_distance
_lowerCAmelCase = disable_ngram_loss
_lowerCAmelCase = eps
# 3 Types of Dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = dropout
_lowerCAmelCase = use_cache
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , add_cross_attention=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
@property
def A__ (self ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 358
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Dict:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : Tuple ) -> Optional[int]:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Dict , snake_case_ : str ) -> Tuple:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 317
| 0
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 317
| 0
|
"""simple docstring"""
from collections import defaultdict
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__A ) )
]
_lowerCAmelCase = defaultdict(__A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCAmelCase = (1 << len(__A )) - 1
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCAmelCase = self.count_ways_until(__A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in task_performed[i]:
self.task[j].append(__A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE : Optional[int] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'informer'
__UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "student_t" , lowerCamelCase = "nll" , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = "mean" , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 64 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = True , lowerCamelCase = "gelu" , lowerCamelCase = 0.05 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 100 , lowerCamelCase = 0.02 , lowerCamelCase=True , lowerCamelCase = "prob" , lowerCamelCase = 5 , lowerCamelCase = True , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = prediction_length
_lowerCAmelCase = context_length or prediction_length
_lowerCAmelCase = distribution_output
_lowerCAmelCase = loss
_lowerCAmelCase = input_size
_lowerCAmelCase = num_time_features
_lowerCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase = scaling
_lowerCAmelCase = num_dynamic_real_features
_lowerCAmelCase = num_static_real_features
_lowerCAmelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_lowerCAmelCase = cardinality
else:
_lowerCAmelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_lowerCAmelCase = embedding_dimension
else:
_lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
_lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = use_cache
# Informer
_lowerCAmelCase = attention_type
_lowerCAmelCase = sampling_factor
_lowerCAmelCase = distil
super().__init__(is_encoder_decoder=__a , **__a )
@property
def A__ (self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 362
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 317
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 317
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : int = 0 ) -> list:
"""simple docstring"""
_lowerCAmelCase = length or len(snake_case__ )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(snake_case__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 366
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 317
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE : Optional[Any] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class __lowerCamelCase ( UpperCAmelCase_ ):
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__lowercase , required=__lowercase , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__lowercase , required=__lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__lowercase , required=__lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__lowercase , default=__lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__lowercase )
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
_lowerCAmelCase = model_type
_lowerCAmelCase = tf_checkpoint
_lowerCAmelCase = pytorch_dump_output
_lowerCAmelCase = config
_lowerCAmelCase = finetuning_task_name
def A__ (self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ''''''
else:
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE : List[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE : Optional[Any] = '' if has_file(args.repo_path, '''config.json''') else 'unet'
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
SCREAMING_SNAKE_CASE : int = reader.read()
SCREAMING_SNAKE_CASE : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE : int = config[key]
del config[key]
SCREAMING_SNAKE_CASE : Tuple = [k.replace('''UNetRes''', '''''') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE : Any = [k.replace('''UNetRes''', '''''') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
SCREAMING_SNAKE_CASE : Optional[int] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
SCREAMING_SNAKE_CASE : Optional[int] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
SCREAMING_SNAKE_CASE : Dict = param_value
SCREAMING_SNAKE_CASE : List[str] = True
if not has_changed:
SCREAMING_SNAKE_CASE : List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 368
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 317
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = vocab_size - 1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def A__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
_lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = True
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = True
_lowerCAmelCase = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
_lowerCAmelCase = output_from_no_past['hidden_states'][0]
_lowerCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__UpperCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def A__ (self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
_lowerCAmelCase = original_model(_UpperCAmelCase ).last_hidden_state
_lowerCAmelCase = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
_lowerCAmelCase = scaled_model(_UpperCAmelCase ).last_hidden_state
_lowerCAmelCase = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
_lowerCAmelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
_lowerCAmelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_lowerCAmelCase = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
_lowerCAmelCase = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 )
_lowerCAmelCase = tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 369
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 317
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Tuple ) -> bool:
"""simple docstring"""
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.