code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from math import isqrt, loga
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 80_0800 , _UpperCAmelCase = 80_0800):
SCREAMING_SNAKE_CASE = degree * loga(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_prime_numbers(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ):
"""simple docstring"""
_lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case )
_lowerCamelCase : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 88 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A =logging.get_logger('''transformers.models.speecht5''')
__A ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__A ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__A ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__A ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__A ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__A ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__A ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__A ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__A ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A =[]
__A =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__A =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__A =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__A =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(a_ , a_ )
if weight_type is not None:
lowerCamelCase_ = getattr(a_ , a_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
if task == "s2t":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2T
lowerCamelCase_ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCamelCase_ = None
lowerCamelCase_ = MAPPING_T2S
lowerCamelCase_ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2S
lowerCamelCase_ = IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(a_ , a_ ):
logger.info(F'{name} was ignored' )
continue
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(a_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , a_ )
if "weight_g" in name:
lowerCamelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ = '''weight_v'''
elif "bias" in name:
lowerCamelCase_ = '''bias'''
elif "weight" in name:
lowerCamelCase_ = '''weight'''
elif "running_mean" in name:
lowerCamelCase_ = '''running_mean'''
elif "running_var" in name:
lowerCamelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCamelCase_ = '''num_batches_tracked'''
else:
lowerCamelCase_ = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a_ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
if config_path is not None:
lowerCamelCase_ = SpeechTaConfig.from_pretrained(a_ )
else:
lowerCamelCase_ = SpeechTaConfig()
if task == "s2t":
lowerCamelCase_ = config.max_text_positions
lowerCamelCase_ = SpeechTaForSpeechToText(a_ )
elif task == "t2s":
lowerCamelCase_ = 1_8_7_6
lowerCamelCase_ = 6_0_0
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForTextToSpeech(a_ )
elif task == "s2s":
lowerCamelCase_ = 1_8_7_6
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForSpeechToSpeech(a_ )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
lowerCamelCase_ = SpeechTaTokenizer(a_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken("<mask>" , lstrip=a_ , rstrip=a_ )
lowerCamelCase_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = SpeechTaProcessor(tokenizer=a_ , feature_extractor=a_ )
processor.save_pretrained(a_ )
lowerCamelCase_ = torch.load(a_ )
recursively_load_weights(fairseq_checkpoint["model"] , a_ , a_ )
model.save_pretrained(a_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 713 |
from itertools import count
def lowerCamelCase_ ( lowerCamelCase__ = 5_0 ):
lowerCamelCase_ = [1] * min_block_length
for n in count(lowerCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
UpperCamelCase = parser.parse_args()
UpperCamelCase = 'cpu'
UpperCamelCase = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
UpperCamelCase = 'path-to-your-trained-model'
UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase = pipe.to(device)
# to channels last
UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase = torch.randn(2, 4, 64, 64)
UpperCamelCase = torch.rand(1) * 999
UpperCamelCase = torch.randn(2, 77, 768)
UpperCamelCase = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase = 666
UpperCamelCase = torch.Generator(device).manual_seed(seed)
UpperCamelCase = {'generator': generator}
if args.steps is not None:
UpperCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowerCamelCase = 8
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=BITS ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =x.device
SCREAMING_SNAKE_CASE =(x * 255).int().clamp(0, 255 )
SCREAMING_SNAKE_CASE =2 ** torch.arange(bits - 1, -1, -1, device=snake_case__ )
SCREAMING_SNAKE_CASE =rearrange(snake_case__, 'd -> d 1 1' )
SCREAMING_SNAKE_CASE =rearrange(snake_case__, 'b c h w -> b c 1 h w' )
SCREAMING_SNAKE_CASE =((x & mask) != 0).float()
SCREAMING_SNAKE_CASE =rearrange(snake_case__, 'b c d h w -> b (c d) h w' )
SCREAMING_SNAKE_CASE =bits * 2 - 1
return bits
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=BITS ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =x.device
SCREAMING_SNAKE_CASE =(x > 0).int()
SCREAMING_SNAKE_CASE =2 ** torch.arange(bits - 1, -1, -1, device=snake_case__, dtype=torch.intaa )
SCREAMING_SNAKE_CASE =rearrange(snake_case__, 'd -> d 1 1' )
SCREAMING_SNAKE_CASE =rearrange(snake_case__, 'b (c d) h w -> b c d h w', d=8 )
SCREAMING_SNAKE_CASE =reduce(x * mask, 'b c d h w -> b c h w', 'sum' )
return (dec / 255).clamp(0.0, 1.0 )
def snake_case__ ( self, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 0.0, lowerCAmelCase_ = True, lowerCAmelCase_=None, lowerCAmelCase_ = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
SCREAMING_SNAKE_CASE =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
SCREAMING_SNAKE_CASE =self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
SCREAMING_SNAKE_CASE =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
SCREAMING_SNAKE_CASE =self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE =torch.clamp(snake_case__, -scale, snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
SCREAMING_SNAKE_CASE =self._get_variance(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
SCREAMING_SNAKE_CASE =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
SCREAMING_SNAKE_CASE =model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
SCREAMING_SNAKE_CASE =torch.randn(model_output.shape, dtype=model_output.dtype, generator=snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE =self._get_variance(snake_case__, snake_case__ ) ** 0.5 * eta * noise
SCREAMING_SNAKE_CASE =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__, pred_original_sample=snake_case__ )
def snake_case__ ( self, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_="epsilon", lowerCAmelCase_=None, lowerCAmelCase_ = True, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =torch.split(snake_case__, sample.shape[1], dim=1 )
else:
SCREAMING_SNAKE_CASE =None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE =self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE =self.alphas_cumprod[t - 1] if t > 0 else self.one
SCREAMING_SNAKE_CASE =1 - alpha_prod_t
SCREAMING_SNAKE_CASE =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
SCREAMING_SNAKE_CASE =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
SCREAMING_SNAKE_CASE =model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
SCREAMING_SNAKE_CASE =self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE =torch.clamp(snake_case__, -scale, snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
SCREAMING_SNAKE_CASE =0
if t > 0:
SCREAMING_SNAKE_CASE =torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=snake_case__ ).to(model_output.device )
SCREAMING_SNAKE_CASE =(self._get_variance(snake_case__, predicted_variance=snake_case__ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__, pred_original_sample=snake_case__ )
class a_ ( _lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : UNetaDConditionModel ,snake_case : Union[DDIMScheduler, DDPMScheduler] ,snake_case : Optional[float] = 1.0 ,):
super().__init__()
SCREAMING_SNAKE_CASE =bit_scale
SCREAMING_SNAKE_CASE =(
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,snake_case : Optional[int] = 256 ,snake_case : Optional[int] = 256 ,snake_case : Optional[int] = 50 ,snake_case : Optional[torch.Generator] = None ,snake_case : Optional[int] = 1 ,snake_case : Optional[str] = "pil" ,snake_case : bool = True ,**snake_case : Optional[Any] ,):
SCREAMING_SNAKE_CASE =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
SCREAMING_SNAKE_CASE =decimal_to_bits(A_ ) * self.bit_scale
SCREAMING_SNAKE_CASE =latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
SCREAMING_SNAKE_CASE =self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE =self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
SCREAMING_SNAKE_CASE =bits_to_decimal(A_ )
if output_type == "pil":
SCREAMING_SNAKE_CASE =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 700 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'encoder-decoder'
__UpperCAmelCase = True
def __init__( self : Dict ,**snake_case : Any ):
super().__init__(**snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE =kwargs.pop('encoder' )
SCREAMING_SNAKE_CASE =encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE =kwargs.pop('decoder' )
SCREAMING_SNAKE_CASE =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =True
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ,snake_case : PretrainedConfig ,snake_case : PretrainedConfig ,**snake_case : str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.encoder.to_dict()
SCREAMING_SNAKE_CASE =self.decoder.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 252 | 0 |
from __future__ import annotations
lowerCAmelCase_ = 1_0
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = 1
snake_case_ : int = max(_UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_UpperCamelCase )
# put each buckets' contents into list_of_ints
snake_case_ : Optional[Any] = 0
for b in range(_UpperCamelCase ):
for i in buckets[b]:
snake_case_ : Optional[int] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a= numpy.array([0, 0])
a= numpy.array([0.5, 0.8_660_254])
a= numpy.array([1, 0])
a= [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( _a : list[numpy.ndarray] , _a : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = initial_vectors
for _ in range(_a ):
__UpperCamelCase : Optional[int] = iteration_step(_a )
return vectors
def _UpperCamelCase ( _a : list[numpy.ndarray] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_a )
__UpperCamelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( _a : numpy.ndarray , _a : float ):
"""simple docstring"""
__UpperCamelCase : Any = numpy.radians(_a )
__UpperCamelCase , __UpperCamelCase : List[Any] = numpy.cos(_a ), numpy.sin(_a )
__UpperCamelCase : List[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_a , _a )
def _UpperCamelCase ( _a : list[numpy.ndarray] ):
"""simple docstring"""
__UpperCamelCase : Any = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCamelCase , __UpperCamelCase : str = zip(*_a )
plt.plot(_a , _a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a= iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 287 | '''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizer
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCAmelCase ( self ):
super().setUp()
__UpperCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , **_lowerCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Dict = 'UNwant\u00E9d,running'
__UpperCamelCase : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def lowerCAmelCase ( self ):
pass
| 287 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
_UpperCamelCase = {
"""google/rembert""": 256,
}
_UpperCamelCase = """▁"""
class lowerCamelCase__ ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = RemBertTokenizer
def __init__( self ,A=None ,A=None ,A=True ,A=True ,A=False ,A="[CLS]" ,A="[SEP]" ,A="<unk>" ,A="[SEP]" ,A="<pad>" ,A="[CLS]" ,A="[MASK]" ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(SCREAMING_SNAKE_CASE_ ,lstrip=SCREAMING_SNAKE_CASE_ ,rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ ,tokenizer_file=SCREAMING_SNAKE_CASE_ ,do_lower_case=SCREAMING_SNAKE_CASE_ ,remove_space=SCREAMING_SNAKE_CASE_ ,keep_accents=SCREAMING_SNAKE_CASE_ ,bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,sep_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,cls_token=SCREAMING_SNAKE_CASE_ ,mask_token=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE_ ) )
return
UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 129 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Union[str, Any] ={
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int =['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int =[
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'M-CLIP'
def __init__( self : List[Any] , __UpperCamelCase : int=1024 , __UpperCamelCase : Union[str, Any]=768 , **__UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case__ : int = transformerDimSize
snake_case__ : Tuple = imageDimSize
super().__init__(**__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = MCLIPConfig
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ) -> str:
"""simple docstring"""
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
snake_case__ : str = XLMRobertaModel(__UpperCamelCase )
snake_case__ : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.transformer(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case__ : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 574 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=3_0 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 2
def _a ( self) -> Optional[int]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = DeiTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = DeiTForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__snake_case = 1
__snake_case = DeiTForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
__snake_case = self.type_sequence_label_size
__snake_case = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case = 1
__snake_case = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> List[Any]:
__snake_case = DeiTModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def _a ( self) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def _a ( self) -> List[str]:
pass
def _a ( self) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> List[Any]:
__snake_case = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self) -> Any:
if not self.model_tester.is_training:
return
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.train()
__snake_case = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
__snake_case = model(**lowercase_).loss
loss.backward()
def _a ( self) -> Optional[int]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case = False
__snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__snake_case = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
__snake_case = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
__snake_case = model(**lowercase_).loss
loss.backward()
def _a ( self) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
*get_values(lowercase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
__snake_case = problem_type['title']
__snake_case = problem_type['num_labels']
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.train()
__snake_case = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
__snake_case = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
__snake_case = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
__snake_case = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def _a ( self) -> Optional[int]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DeiTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> str:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def _a ( self) -> Optional[int]:
__snake_case = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([-1.0266, 0.1912, -1.2861]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _a ( self) -> Dict:
__snake_case = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt')
__snake_case = inputs.pixel_values.to(lowercase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__snake_case = model(lowercase_)
| 313 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = CustomTokenizer
pass
| 313 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = 1
snake_case__ : str = 3
snake_case__ : Any = (3_2, 3_2)
snake_case__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __lowerCamelCase ( self :Dict ):
torch.manual_seed(0 )
snake_case__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
return model
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : str = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_6 ,)
return RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
@property
def __lowerCamelCase ( self :str ):
def extract(*__lowercase :Dict ,**__lowercase :str ):
class a :
def __init__( self :Tuple ):
snake_case__ : Union[str, Any] = torch.ones([0] )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.dummy_cond_unet
snake_case__ : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
snake_case__ : str = self.dummy_vae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case__ : Any = 7_7
snake_case__ : Union[str, Any] = self.dummy_image.to(lowerCAmelCase_ )
snake_case__ : Optional[int] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case__ : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ ,scheduler=lowerCAmelCase_ ,vae=lowerCAmelCase_ ,text_encoder=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,feature_extractor=self.dummy_extractor ,)
snake_case__ : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase_ )
snake_case__ : Union[str, Any] = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
snake_case__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
snake_case__ : Optional[Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
snake_case__ : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=lowerCAmelCase_ ,)
snake_case__ : Optional[int] = output.images
snake_case__ : str = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
snake_case__ : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=lowerCAmelCase_ ,return_dict=lowerCAmelCase_ ,)[0]
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[str] = self.dummy_cond_unet
snake_case__ : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
snake_case__ : str = self.dummy_vae
snake_case__ : List[str] = self.dummy_text_encoder
snake_case__ : List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case__ : List[str] = 7_7
snake_case__ : str = self.dummy_image.to(lowerCAmelCase_ )
# put models in fp16
snake_case__ : Optional[int] = unet.half()
snake_case__ : List[str] = vae.half()
snake_case__ : Any = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ : Any = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ ,scheduler=lowerCAmelCase_ ,vae=lowerCAmelCase_ ,text_encoder=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,feature_extractor=self.dummy_extractor ,)
snake_case__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase_ )
snake_case__ : Optional[int] = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
snake_case__ : Tuple = '''A painting of a squirrel eating a burger'''
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Dict = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,num_inference_steps=2 ,output_type='''np''' ,image=lowerCAmelCase_ ,).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Any = init_image.resize((7_6_0, 5_0_4) )
snake_case__ : List[Any] = '''BAAI/AltDiffusion'''
snake_case__ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,)
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
snake_case__ : Tuple = '''A fantasy landscape, trending on artstation'''
snake_case__ : List[str] = torch.manual_seed(0 )
snake_case__ : List[str] = pipe(
prompt=lowerCAmelCase_ ,image=lowerCAmelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase_ ,output_type='''np''' ,)
snake_case__ : Optional[Any] = output.images[0]
snake_case__ : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
snake_case__ : int = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case__ : int = init_image.resize((7_6_8, 5_1_2) )
snake_case__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
snake_case__ : Tuple = '''BAAI/AltDiffusion'''
snake_case__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,)
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = '''A fantasy landscape, trending on artstation'''
snake_case__ : Union[str, Any] = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(
prompt=lowerCAmelCase_ ,image=lowerCAmelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase_ ,output_type='''np''' ,)
snake_case__ : str = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 712 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
def __init__( self :int ,__lowercase :Union[str, Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Dict=7 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=True ,__lowercase :Tuple=False ,__lowercase :Optional[int]=True ,__lowercase :Optional[int]=9_9 ,__lowercase :Optional[Any]=3_2 ,__lowercase :Union[str, Any]=5 ,__lowercase :Dict=4 ,__lowercase :Optional[Any]=3_7 ,__lowercase :Optional[int]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :str=5_1_2 ,__lowercase :str=1_6 ,__lowercase :Optional[Any]=2 ,__lowercase :Union[str, Any]=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :Optional[Any]=4 ,__lowercase :Any=None ,):
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Tuple = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : int = use_token_type_ids
snake_case__ : List[str] = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Optional[int] = scope
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Tuple = None
if self.use_token_type_ids:
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Optional[Any] ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :int ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ):
snake_case__ : int = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :Tuple ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : List[str] = True
snake_case__ : Union[str, Any] = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,)
snake_case__ : str = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,)
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :int ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : Optional[int] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : int = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Tuple ,__lowercase :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :List[str] ,):
snake_case__ : int = True
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : Tuple = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,)
snake_case__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
snake_case__ : Any = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Any = False
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Any = LlamaModelTester(self )
snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = 3
snake_case__ : Union[str, Any] = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[str] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : List[Any] = '''single_label_classification'''
snake_case__ : Tuple = input_dict['''input_ids''']
snake_case__ : Optional[int] = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Dict = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = 3
snake_case__ : Optional[int] = '''multi_label_classification'''
snake_case__ : str = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Optional[int] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCamelCase ( self :Dict ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Tuple ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = ids_tensor([1, 1_0] ,config.vocab_size )
snake_case__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Any = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : str = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state
snake_case__ : Dict = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
snake_case__ : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
snake_case__ : Any = model(torch.tensor(__lowercase ) )
snake_case__ : Optional[int] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
snake_case__ : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : Tuple = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that '''
snake_case__ : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : List[Any] = tokenizer.encode(__lowercase ,return_tensors='''pt''' )
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase )
# greedy generation outputs
snake_case__ : int = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase )
snake_case__ : Union[str, Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
| 219 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(_SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE = []
def generate(__UpperCAmelCase , __UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _SCREAMING_SNAKE_CASE )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__SCREAMING_SNAKE_CASE = arr[k - 1], arr[i]
else: # k is odd
__SCREAMING_SNAKE_CASE = arr[k - 1], arr[0]
generate(k - 1 , _SCREAMING_SNAKE_CASE )
generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
a = input("Enter numbers separated by a comma:\n").strip()
a = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 109 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 211 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : int = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = ["""ViTFeatureExtractor"""]
__a : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 522 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowercase , lowercase , bias=lowercase )
__lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__a : Union[str, Any] = parser.parse_args()
__a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 522 | 1 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCamelCase : str = MaskFormerConfig(backbone_config=snake_case_ )
UpperCamelCase : Optional[Any] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase : Any = 8_4_7
UpperCamelCase : List[Any] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
UpperCamelCase : Dict = 1_5_0
UpperCamelCase : Optional[Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase : Tuple = 1_7_1
UpperCamelCase : str = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
UpperCamelCase : List[str] = 1_3_3
UpperCamelCase : Dict = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase : Any = 1_9
UpperCamelCase : Optional[Any] = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
UpperCamelCase : Optional[int] = 6_5
UpperCamelCase : Any = """mapillary-vistas-id2label.json"""
UpperCamelCase : List[str] = json.load(open(hf_hub_download(snake_case_ ,snake_case_ ,repo_type="""dataset""" ) ,"""r""" ) )
UpperCamelCase : Dict = {int(snake_case_ ): v for k, v in idalabel.items()}
return config
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
UpperCamelCase : str = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( snake_case_ : str ,snake_case_ : List[str] ,snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Tuple = dct.pop(snake_case_ )
UpperCamelCase : List[str] = val
def A_ ( snake_case_ : str ,snake_case_ : List[Any] ):
'''simple docstring'''
UpperCamelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase : List[Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase : Optional[Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : str = in_proj_weight[:dim, :]
UpperCamelCase : Tuple = in_proj_bias[: dim]
UpperCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase : str = in_proj_bias[
dim : dim * 2
]
UpperCamelCase : List[str] = in_proj_weight[
-dim :, :
]
UpperCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( snake_case_ : Tuple ,snake_case_ : Tuple ):
'''simple docstring'''
# fmt: off
UpperCamelCase : List[str] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase : List[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
UpperCamelCase : int = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
UpperCamelCase : int = in_proj_bias[:config.hidden_size]
UpperCamelCase : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase : Any = in_proj_weight[-hidden_size :, :]
UpperCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase : int = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
UpperCamelCase : Tuple = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : List[str] = in_proj_weight[: hidden_size, :]
UpperCamelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
UpperCamelCase : Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase : Dict = in_proj_weight[-hidden_size :, :]
UpperCamelCase : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase : Tuple = Image.open(requests.get(snake_case_ ,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def A_ ( snake_case_ : str ,snake_case_ : str ,snake_case_ : str ,snake_case_ : bool = False ):
'''simple docstring'''
UpperCamelCase : str = get_maskformer_config(snake_case_ )
# load original state_dict
with open(snake_case_ ,"""rb""" ) as f:
UpperCamelCase : str = pickle.load(snake_case_ )
UpperCamelCase : Union[str, Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase : Dict = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ ,snake_case_ ,snake_case_ )
read_in_swin_q_k_v(snake_case_ ,config.backbone_config )
read_in_decoder_q_k_v(snake_case_ ,snake_case_ )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase : List[Any] = torch.from_numpy(snake_case_ )
# load 🤗 model
UpperCamelCase : Tuple = MaskFormerForInstanceSegmentation(snake_case_ )
model.eval()
for name, param in model.named_parameters():
print(snake_case_ ,param.shape )
UpperCamelCase , UpperCamelCase : Optional[Any] = model.load_state_dict(snake_case_ ,strict=snake_case_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(snake_case_ ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase : str = prepare_img()
if "vistas" in model_name:
UpperCamelCase : Optional[int] = 6_5
elif "cityscapes" in model_name:
UpperCamelCase : Tuple = 6_5_5_3_5
else:
UpperCamelCase : Optional[Any] = 2_5_5
UpperCamelCase : List[Any] = True if """ade""" in model_name else False
UpperCamelCase : List[str] = MaskFormerImageProcessor(ignore_index=snake_case_ ,reduce_labels=snake_case_ )
UpperCamelCase : str = image_processor(snake_case_ ,return_tensors="""pt""" )
UpperCamelCase : List[Any] = model(**snake_case_ )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,snake_case_ ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 499 |
"""simple docstring"""
def A_ ( snake_case_ : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
UpperCamelCase : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,snake_case_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 499 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = AudioLDMPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
_SCREAMING_SNAKE_CASE : str = TEXT_TO_AUDIO_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_lowerCAmelCase , )
a =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
a =ClapTextModelWithProjection(_lowerCAmelCase )
a =RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
a =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowerCAmelCase , )
a =SpeechTaHifiGan(_lowerCAmelCase )
a ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
a =torch.manual_seed(_lowerCAmelCase )
else:
a =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
a ={
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCAmelCase__ ( self ):
a ="""cpu""" # ensure determinism for the device-dependent torch.Generator
a =self.get_dummy_components()
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_dummy_inputs(_lowerCAmelCase )
a =audioldm_pipe(**_lowerCAmelCase )
a =output.audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) == 256
a =audio[:10]
a =np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
a =self.get_dummy_components()
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_dummy_inputs(_lowerCAmelCase )
a =3 * [inputs["""prompt"""]]
# forward
a =audioldm_pipe(**_lowerCAmelCase )
a =output.audios[0]
a =self.get_dummy_inputs(_lowerCAmelCase )
a =3 * [inputs.pop("""prompt""" )]
a =audioldm_pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
a =text_inputs["""input_ids"""].to(_lowerCAmelCase )
a =audioldm_pipe.text_encoder(
_lowerCAmelCase , )
a =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a =F.normalize(_lowerCAmelCase , dim=-1 )
a =prompt_embeds
# forward
a =audioldm_pipe(**_lowerCAmelCase )
a =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase__ ( self ):
a =self.get_dummy_components()
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_dummy_inputs(_lowerCAmelCase )
a =3 * ["""this is a negative prompt"""]
a =negative_prompt
a =3 * [inputs["""prompt"""]]
# forward
a =audioldm_pipe(**_lowerCAmelCase )
a =output.audios[0]
a =self.get_dummy_inputs(_lowerCAmelCase )
a =3 * [inputs.pop("""prompt""" )]
a =[]
for p in [prompt, negative_prompt]:
a =audioldm_pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
a =text_inputs["""input_ids"""].to(_lowerCAmelCase )
a =audioldm_pipe.text_encoder(
_lowerCAmelCase , )
a =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a =F.normalize(_lowerCAmelCase , dim=-1 )
embeds.append(_lowerCAmelCase )
a , a =embeds
# forward
a =audioldm_pipe(**_lowerCAmelCase )
a =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase__ ( self ):
a ="""cpu""" # ensure determinism for the device-dependent torch.Generator
a =self.get_dummy_components()
a =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_dummy_inputs(_lowerCAmelCase )
a ="""egg cracking"""
a =audioldm_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
a =output.audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) == 256
a =audio[:10]
a =np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
a ="""cpu""" # ensure determinism for the device-dependent torch.Generator
a =self.get_dummy_components()
a =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a ="""A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
a =audioldm_pipe(_lowerCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a =2
a =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a =2
a =audioldm_pipe(_lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=_lowerCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a =2
a =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowerCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase__ ( self ):
a ="""cpu""" # ensure determinism for the device-dependent torch.Generator
a =self.get_dummy_components()
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =audioldm_pipe.vocoder.config.sampling_rate
a =self.get_dummy_inputs(_lowerCAmelCase )
a =audioldm_pipe(audio_length_in_s=0.0_16 , **_lowerCAmelCase )
a =output.audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) / vocoder_sampling_rate == 0.0_16
a =audioldm_pipe(audio_length_in_s=0.0_32 , **_lowerCAmelCase )
a =output.audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) / vocoder_sampling_rate == 0.0_32
def lowerCAmelCase__ ( self ):
a =self.get_dummy_components()
a =AudioLDMPipeline(**_lowerCAmelCase )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =["""hey"""]
a =audioldm_pipe(_lowerCAmelCase , num_inference_steps=1 )
a =output.audios.shape
assert audio_shape == (1, 256)
a =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a =SpeechTaHifiGan(_lowerCAmelCase ).to(_lowerCAmelCase )
a =audioldm_pipe(_lowerCAmelCase , num_inference_steps=1 )
a =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowerCAmelCase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowerCAmelCase )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=0 ):
a =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
a =np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 8, 128, 16) )
a =torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
a ={
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCAmelCase__ ( self ):
a =AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_inputs(_lowerCAmelCase )
a =25
a =audioldm_pipe(**_lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) == 81_920
a =audio[77_230:77_240]
a =np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
a =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCAmelCase__ ( self ):
a =AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
a =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
a =audioldm_pipe.to(_lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
a =self.get_inputs(_lowerCAmelCase )
a =audioldm_pipe(**_lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(_lowerCAmelCase ) == 81_920
a =audio[27_780:27_790]
a =np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
a =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 321 |
_lowerCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 321 | 1 |
from __future__ import annotations
__A : Union[str, Any] = [True] * 1_0_0_0_0_0_1
__A : Union[str, Any] = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
__A : str = False
i += 1
def __a ( A__ : int ):
return seive[n]
def __a ( A__ : int ):
return any(digit in "02468" for digit in str(A__ ) )
def __a ( A__ : int = 1000000 ):
SCREAMING_SNAKE_CASE = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A__ ) and not contains_an_even_digit(A__ ):
SCREAMING_SNAKE_CASE = str(A__ )
SCREAMING_SNAKE_CASE = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )]
if all(is_prime(A__ ) for i in list_nums ):
result.append(A__ )
return result
def __a ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }') | 16 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = "bridgetower_vision_model"
def __init__( self : List[str] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Optional[Any]=288 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Tuple=1E-0_5 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = stop_gradient
_lowerCAmelCase = share_layernorm
_lowerCAmelCase = remove_last_layer
@classmethod
def __lowerCamelCase ( cls : Tuple , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
if config_dict.get('model_type' ) == "bridgetower":
_lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = "bridgetower_text_model"
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=50_265 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=514 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Optional[int]=1E-0_5 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict="absolute" , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str] ) -> "PretrainedConfig":
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
if config_dict.get('model_type' ) == "bridgetower":
_lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = "bridgetower"
def __init__( self : Tuple , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : List[str]=1E-0_5 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]="add" , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = kwargs.pop('text_config_dict' , UpperCAmelCase_ )
_lowerCAmelCase = kwargs.pop('vision_config_dict' , UpperCAmelCase_ )
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = share_cross_modal_transformer_layers
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_size
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = share_link_tower_layers
_lowerCAmelCase = link_tower_type
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = tie_word_embeddings
_lowerCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
_lowerCAmelCase = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
_lowerCAmelCase = BridgeTowerTextConfig(**UpperCAmelCase_ )
_lowerCAmelCase = BridgeTowerVisionConfig(**UpperCAmelCase_ )
@classmethod
def __lowerCamelCase ( cls : List[Any] , UpperCAmelCase_ : BridgeTowerTextConfig , UpperCAmelCase_ : BridgeTowerVisionConfig , **UpperCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.text_config.to_dict()
_lowerCAmelCase = self.vision_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 491 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=[0.48145466, 0.4578275, 0.40821073] , UpperCAmelCase_ : List[Any]=[0.26862954, 0.26130258, 0.27577711] , UpperCAmelCase_ : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_convert_rgb
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=False ) -> str:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_lowerCAmelCase = []
for i in range(self.batch_size ):
_lowerCAmelCase , _lowerCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_lowerCAmelCase = [torch.from_numpy(UpperCAmelCase_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCAmelCase_ )
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_convert_rgb' ) )
def __lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCAmelCase_ )
_lowerCAmelCase = 3
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_convert_rgb' ) )
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 491 | 1 |
snake_case = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
} | 67 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A_ : List[str] = pytest.mark.integration
@require_faiss
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__UpperCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
SCREAMING_SNAKE_CASE__ = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dset.add_faiss_index("""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
from elasticsearch import Elasticsearch
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 3_0 )
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 2_9}]}}
SCREAMING_SNAKE_CASE__ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
SCREAMING_SNAKE_CASE__ = np.eye(5 , dtype=np.floataa )[::-1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
import faiss
SCREAMING_SNAKE_CASE__ = faiss.IndexFlat(5 )
SCREAMING_SNAKE_CASE__ = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A ( snake_case__ ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
SCREAMING_SNAKE_CASE__ = """index.faiss"""
SCREAMING_SNAKE_CASE__ = f"""mock://{index_name}"""
index.save(snake_case__ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = Elasticsearch()
SCREAMING_SNAKE_CASE__ = {"""acknowledged""": True}
SCREAMING_SNAKE_CASE__ = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
SCREAMING_SNAKE_CASE__ = """foo"""
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
SCREAMING_SNAKE_CASE__ = """foo"""
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
SCREAMING_SNAKE_CASE__ = ["""foo""", """bar""", """foobar"""]
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
SCREAMING_SNAKE_CASE__ = ["""foo""", """bar""", """foobar"""]
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase , request_timeout=3_0 )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 196 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( a : List[str] , a : Tuple=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__A : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCAmelCase_ ( a : str ):
return unittest.skip('Test was skipped' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Optional[int] ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Optional[int] ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[Any] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Optional[Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Any ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[Any] ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : int=None , a : Optional[int]=None ):
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , __lowerCAmelCase ) , f'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowerCAmelCase )
def lowerCAmelCase_ ( a : List[Any] ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowerCAmelCase )
__A : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( a : Any ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowerCAmelCase )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = True
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
a__ = tempfile.mkdtemp()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase__ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( a : List[Any] ):
a__ = AcceleratorState()
a__ = tensor[None].clone().to(state.device )
a__ = gather(__lowerCAmelCase ).cpu()
a__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
a__ = returncode
a__ = stdout
a__ = stderr
async def lowerCAmelCase_ ( a : Dict , a : List[str] ):
while True:
a__ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def lowerCAmelCase_ ( a : str , a : str=None , a : Optional[Any]=None , a : int=None , a : List[str]=False , a : Optional[Any]=False ):
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(a : Union[str, Any] , a : Any , a : int , a : int="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( a : List[Any] , a : Optional[int]=None , a : Any=None , a : int=180 , a : Tuple=False , a : str=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
a__ = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
a__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _UpperCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( a : Union[str, Any] , a : Optional[Any]=False ):
try:
a__ = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , 'decode' ):
a__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 712 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : Any = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( a : Any , a : Tuple , a : Tuple , a : str , a : int ):
for attribute in key.split('.' ):
a__ = getattr(a , a )
if weight_type is not None:
a__ = getattr(a , a ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( a : Union[str, Any] , a : List[str] ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ = True
if "*" in mapped_key:
a__ = name.split(a )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , a )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a )
@torch.no_grad()
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] , a : List[Any]=None ):
# load the pre-trained checkpoints
a__ = torch.load(a )
a__ = WavLMConfigOrig(checkpoint['cfg'] )
a__ = WavLMOrig(a )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
a__ = WavLMConfig.from_pretrained(a )
else:
a__ = WavLMConfig()
a__ = WavLMModel(a )
recursively_load_weights(a , a )
hf_wavlm.save_pretrained(a )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 126 | 0 |
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCAmelCase__ = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
lowerCAmelCase__ = grid[row_n]
lowerCAmelCase__ = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = grid[row_n]
return grid[-1][-1]
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
import math
from collections.abc import Callable
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : float = xa
lowerCamelCase__ : float = xa
while True:
if x_n == x_na or function(UpperCAmelCase ) == function(UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
lowerCamelCase__ : float = x_na - (
function(UpperCAmelCase ) / ((function(UpperCAmelCase ) - function(UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase__ : Any = x_na
lowerCamelCase__ : int = x_na
def _a ( UpperCAmelCase ) -> float:
"""simple docstring"""
return math.pow(UpperCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 315 | 0 |
import random
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = a[left_index]
__lowerCAmelCase = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE_ ):
if a[j] < pivot:
__lowerCAmelCase , __lowerCAmelCase = a[i], a[j]
i += 1
__lowerCAmelCase , __lowerCAmelCase = a[i - 1], a[left_index]
return i - 1
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if left < right:
__lowerCAmelCase = random.randint(SCREAMING_SNAKE_CASE_ , right - 1 )
__lowerCAmelCase , __lowerCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
quick_sort_random(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE_ , pivot_index + 1 , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the right of the pivot point
def _a ( ):
__lowerCAmelCase = input("Enter numbers separated by a comma:\n" ).strip()
__lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for item in user_input.split("," )]
quick_sort_random(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 552 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase__ = """."""
if __name__ == "__main__":
UpperCamelCase__ = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
UpperCamelCase__ = []
UpperCamelCase__ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase__ = line.strip()
UpperCamelCase__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase__ = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 552 | 1 |
import qiskit
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : Union[str, Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__lowerCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 669 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 1 |
import os
import sys
import transformers
__A = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 709 | """simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ):
__a = abs(_lowerCAmelCase )
__a = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCamelCase ( _lowerCAmelCase : int ):
__a = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCamelCase ( _lowerCAmelCase : int ):
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def UpperCamelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
__a = f"""{func.__name__}({value})"""
__a = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 173 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A__ = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b"
A__ = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b"
A__ = max(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase_ ), b_binary.zfill(UpperCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Tuple , __UpperCAmelCase: str , __UpperCAmelCase: Optional[Any]=1024 ) -> Optional[int]:
UpperCamelCase__ : Optional[int] = [], []
UpperCamelCase__ : Union[str, Any] = list(zip(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase__ : Union[str, Any] = sorted_examples[0]
def is_too_big(__UpperCAmelCase: int ):
return tok(__UpperCAmelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase__ : str = new_src + ''' ''' + src
UpperCamelCase__ : Tuple = new_tgt + ''' ''' + tgt
if is_too_big(__UpperCAmelCase ) or is_too_big(__UpperCAmelCase ): # cant fit, finalize example
finished_src.append(__UpperCAmelCase )
finished_tgt.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = src, tgt
else: # can fit, keep adding
UpperCamelCase__ : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__UpperCAmelCase )
finished_tgt.append(__UpperCAmelCase )
return finished_src, finished_tgt
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict ) -> Dict:
UpperCamelCase__ : Any = Path(__UpperCAmelCase )
save_path.mkdir(exist_ok=__UpperCAmelCase )
for split in ["train"]:
UpperCamelCase__ : List[str] = data_dir / f"{split}.source", data_dir / f"{split}.target"
UpperCamelCase__ : Optional[int] = [x.rstrip() for x in Path(__UpperCAmelCase ).open().readlines()]
UpperCamelCase__ : int = [x.rstrip() for x in Path(__UpperCAmelCase ).open().readlines()]
UpperCamelCase__ : List[Any] = pack_examples(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print(f"packed {split} split from {len(__UpperCAmelCase )} examples -> {len(__UpperCAmelCase )}." )
Path(save_path / f"{split}.source" ).open('''w''' ).write('''\n'''.join(__UpperCAmelCase ) )
Path(save_path / f"{split}.target" ).open('''w''' ).write('''\n'''.join(__UpperCAmelCase ) )
for split in ["val", "test"]:
UpperCamelCase__ : Optional[int] = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(__UpperCAmelCase , save_path / f"{split}.source" )
shutil.copyfile(__UpperCAmelCase , save_path / f"{split}.target" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__UpperCAmelCase , default=128 )
parser.add_argument('''--data_dir''' , type=__UpperCAmelCase )
parser.add_argument('''--save_path''' , type=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = parser.parse_args()
UpperCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 718 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = 1
@register_to_config
def __init__( self, __magic_name__ = 2000, __magic_name__ = 0.15, __magic_name__ = 0.01, __magic_name__ = 1348.0, __magic_name__ = 1E-5, __magic_name__ = 1, ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
UpperCamelCase__ : int = sigma_max
# setable values
UpperCamelCase__ : Optional[int] = None
self.set_sigmas(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : List[Any] = torch.linspace(1, __magic_name__, __magic_name__, device=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : str = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[Any] = torch.exp(torch.linspace(math.log(__magic_name__ ), math.log(__magic_name__ ), __magic_name__ ) )
UpperCamelCase__ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase__ : Any = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ : str = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ : List[Any] = self.get_adjacent_sigma(__magic_name__, __magic_name__ ).to(sample.device )
UpperCamelCase__ : Optional[Any] = torch.zeros_like(__magic_name__ )
UpperCamelCase__ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ : Any = diffusion.unsqueeze(-1 )
UpperCamelCase__ : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : Union[str, Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__magic_name__, device=sample.device, dtype=sample.dtype )
UpperCamelCase__ : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__magic_name__, prev_sample_mean=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : List[str] = randn_tensor(sample.shape, layout=sample.layout, generator=__magic_name__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : str = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Tuple = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ : int = step_size.unsqueeze(-1 )
UpperCamelCase__ : int = sample + step_size * model_output
UpperCamelCase__ : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : Any = timesteps.to(original_samples.device )
UpperCamelCase__ : List[str] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__magic_name__ ) * sigmas[:, None, None, None]
)
UpperCamelCase__ : int = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 369 | 0 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=0.2 , snake_case_ : Any=0.2 ):
__snake_case = bp_numa
__snake_case = bp_numa
__snake_case = bp_numa
__snake_case = conva_get[:2]
__snake_case = conva_get[2]
__snake_case = size_pa
__snake_case = rate_w
__snake_case = rate_t
__snake_case = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__snake_case = -2 * np.random.rand(self.conva[1] ) + 1
__snake_case = -2 * np.random.rand(self.num_bpa ) + 1
__snake_case = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Optional[int] ):
__snake_case = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(A__ , "wb" ) as f:
pickle.dump(A__ , A__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , snake_case_ : Tuple ):
with open(A__ , "rb" ) as f:
__snake_case = pickle.load(A__ ) # noqa: S301
__snake_case = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
__snake_case = model_dic.get("size_pooling1" )
__snake_case = model_dic.get("num_bp1" )
__snake_case = model_dic.get("num_bp2" )
__snake_case = model_dic.get("num_bp3" )
__snake_case = model_dic.get("rate_weight" )
__snake_case = model_dic.get("rate_thre" )
# create model instance
__snake_case = CNN(A__ , A__ , A__ , A__ , A__ , A__ , A__ )
# modify model parameter
__snake_case = model_dic.get("w_conv1" )
__snake_case = model_dic.get("wkj" )
__snake_case = model_dic.get("vji" )
__snake_case = model_dic.get("thre_conv1" )
__snake_case = model_dic.get("thre_bp2" )
__snake_case = model_dic.get("thre_bp3" )
return conv_ins
def lowerCAmelCase ( self : List[Any] , snake_case_ : Tuple ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCAmelCase ( self : Any , snake_case_ : Tuple ):
return round(A__ , 3 )
def lowerCAmelCase ( self : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
__snake_case = convs[0]
__snake_case = convs[1]
__snake_case = np.shape(A__ )[0]
# get the data slice of original image data, data_focus
__snake_case = []
for i_focus in range(0 , size_data - size_conv + 1 , A__ ):
for j_focus in range(0 , size_data - size_conv + 1 , A__ ):
__snake_case = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A__ )
# calculate the feature map of every single kernel, and saved as list of matrix
__snake_case = []
__snake_case = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A__ ):
__snake_case = []
for i_focus in range(len(A__ ) ):
__snake_case = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A__ ) )
__snake_case = np.asmatrix(A__ ).reshape(
A__ , A__ )
data_featuremap.append(A__ )
# expanding the data slice to One dimenssion
__snake_case = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A__ ) )
__snake_case = np.asarray(A__ )
return focus_list, data_featuremap
def lowerCAmelCase ( self : str , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Union[str, Any]="average_pool" ):
__snake_case = len(featuremaps[0] )
__snake_case = int(size_map / size_pooling )
__snake_case = []
for i_map in range(len(A__ ) ):
__snake_case = featuremaps[i_map]
__snake_case = []
for i_focus in range(0 , A__ , A__ ):
for j_focus in range(0 , A__ , A__ ):
__snake_case = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A__ ) )
__snake_case = np.asmatrix(A__ ).reshape(A__ , A__ )
featuremap_pooled.append(A__ )
return featuremap_pooled
def lowerCAmelCase ( self : List[str] , snake_case_ : Optional[int] ):
__snake_case = []
for i in range(len(A__ ) ):
__snake_case = np.shape(data[i] )
__snake_case = data[i].reshape(1 , shapes[0] * shapes[1] )
__snake_case = data_listed.getA().tolist()[0]
data_expanded.extend(A__ )
__snake_case = np.asarray(A__ )
return data_expanded
def lowerCAmelCase ( self : str , snake_case_ : List[str] ):
__snake_case = np.asarray(A__ )
__snake_case = np.shape(A__ )
__snake_case = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCAmelCase ( self : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__snake_case = []
__snake_case = 0
for i_map in range(A__ ):
__snake_case = np.ones((size_map, size_map) )
for i in range(0 , A__ , A__ ):
for j in range(0 , A__ , A__ ):
__snake_case = pd_pool[
i_pool
]
__snake_case = i_pool + 1
__snake_case = np.multiply(
A__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A__ )
return pd_all
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int=bool ):
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(A__ )) )
print((" - - Shape: Teach_Data ", np.shape(A__ )) )
__snake_case = 0
__snake_case = []
__snake_case = 10000
while rp < n_repeat and mse >= error_accuracy:
__snake_case = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(A__ ) ):
# print('------------Learning Image: %d--------------'%p)
__snake_case = np.asmatrix(datas_train[p] )
__snake_case = np.asarray(datas_teach[p] )
__snake_case = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case = self.pooling(A__ , self.size_poolinga )
__snake_case = np.shape(A__ )
__snake_case = self._expand(A__ )
__snake_case = data_bp_input
__snake_case = np.dot(A__ , self.vji.T ) - self.thre_bpa
__snake_case = self.sig(A__ )
__snake_case = np.dot(A__ , self.wkj.T ) - self.thre_bpa
__snake_case = self.sig(A__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__snake_case = np.multiply(
(data_teach - bp_outa) , np.multiply(A__ , (1 - bp_outa) ) )
__snake_case = np.multiply(
np.dot(A__ , self.wkj ) , np.multiply(A__ , (1 - bp_outa) ) )
__snake_case = np.dot(A__ , self.vji )
__snake_case = pd_i_all / (self.size_poolinga * self.size_poolinga)
__snake_case = pd_conva_pooled.T.getA().tolist()
__snake_case = self._calculate_gradient_from_pool(
A__ , A__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__snake_case = self._expand_mat(pd_conva_all[k_conv] )
__snake_case = self.rate_weight * np.dot(A__ , A__ )
__snake_case = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__snake_case = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__snake_case = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__snake_case = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__snake_case = self.thre_bpa - pd_k_all * self.rate_thre
__snake_case = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__snake_case = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__snake_case = rp + 1
__snake_case = error_count / patterns
all_mse.append(A__ )
def draw_error():
__snake_case = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A__ , "+-" )
plt.plot(A__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(A__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowerCAmelCase ( self : Tuple , snake_case_ : Optional[int] ):
__snake_case = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(A__ )) )
for p in range(len(A__ ) ):
__snake_case = np.asmatrix(datas_test[p] )
__snake_case = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case = self.pooling(A__ , self.size_poolinga )
__snake_case = self._expand(A__ )
__snake_case = data_bp_input
__snake_case = bp_outa * self.vji.T - self.thre_bpa
__snake_case = self.sig(A__ )
__snake_case = bp_outa * self.wkj.T - self.thre_bpa
__snake_case = self.sig(A__ )
produce_out.extend(bp_outa.getA().tolist() )
__snake_case = [list(map(self.do_round , A__ ) ) for each in produce_out]
return np.asarray(A__ )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Dict ):
__snake_case = np.asmatrix(A__ )
__snake_case = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__snake_case = self.pooling(A__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 163 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : int = logging.getLogger()
def UpperCamelCase ( lowercase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase ={}
lowercase =os.path.join(lowercase_ , '''all_results.json''' )
if os.path.exists(lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
lowercase =json.load(lowercase_ )
else:
raise ValueError(f'can\'t find {path}' )
return results
_UpperCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def _A( self ):
import xla_spawn
lowercase =self.get_auto_remove_tmp_dir()
lowercase =f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(snake_case_ , '''argv''' , snake_case_ ):
lowercase =time()
xla_spawn.main()
lowercase =time()
lowercase =get_results(snake_case_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def _A( self ):
import xla_spawn
lowercase ='''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(snake_case_ , '''argv''' , snake_case_ ):
xla_spawn.main()
| 145 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=4 , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =eos_token_id
lowercase =pad_token_id
lowercase =bos_token_id
lowercase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase =prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
lowercase =tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
lowercase =global_attention_mask
return config, inputs_dict
def _A( self , snake_case_ , snake_case_ ):
lowercase =TFLEDModel(config=snake_case_ ).get_decoder()
lowercase =inputs_dict['''input_ids''']
lowercase =input_ids[:1, :]
lowercase =inputs_dict['''attention_mask'''][:1, :]
lowercase =1
# first forward pass
lowercase =model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
lowercase , lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase =model(snake_case_ , attention_mask=snake_case_ )[0]
lowercase =model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase =output_from_no_past[:, -3:, random_slice_idx]
lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : Any=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase =tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFLEDModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase =2
lowercase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowercase =True
lowercase =self.model_tester.seq_length
lowercase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ ):
lowercase =outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ ):
lowercase =[t.numpy() for t in outputs.encoder_attentions]
lowercase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =False
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _A( self ):
pass
def _A( self ):
# TODO: Head-masking not yet implement
pass
def UpperCamelCase ( lowercase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.constant(lowercase_ , dtype=tf.intaa )
_UpperCAmelCase : Any = 1e-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, 7_68)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 )
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
| 145 | 1 |
def __lowercase ( lowerCamelCase : int = 1000000 ):
UpperCamelCase_ : Tuple = limit + 1
UpperCamelCase_ : Optional[int] = [0] * limit
for first_term in range(1 , lowerCamelCase ):
for n in range(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase_ : int = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 417 | import math
import os
import sys
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCamelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCamelCase_ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
UpperCamelCase_ : Optional[int] = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase_ : Optional[int] = '0' + lexicon[curr_key]
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = {'0': '0', '1': '1'}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = '', ''
UpperCamelCase_ : List[str] = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCamelCase_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = os.path.getsize(lowerCamelCase )
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
UpperCamelCase_ : int = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCamelCase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = read_file_binary(lowerCamelCase )
UpperCamelCase_ : Optional[int] = compress_data(lowerCamelCase )
UpperCamelCase_ : Dict = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 417 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
"""simple docstring"""
def __init__( self : Tuple , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = str(id_)
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Any = {} # {vertex:distance}
def __lt__( self : int , _A : str):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Dict):
"""simple docstring"""
return self.id
def _lowerCAmelCase ( self : Optional[int] , _A : str):
"""simple docstring"""
self.neighbors.append(_A)
def _lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = weight
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> list:
_SCREAMING_SNAKE_CASE : str = []
for u in graph:
_SCREAMING_SNAKE_CASE : int = math.inf
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = graph[:]
while q:
_SCREAMING_SNAKE_CASE : Optional[int] = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE : Union[str, Any] = u
_SCREAMING_SNAKE_CASE : Any = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Iterator[tuple]:
for u in graph:
_SCREAMING_SNAKE_CASE : str = math.inf
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Tuple = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
_SCREAMING_SNAKE_CASE : Dict = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE : int = u
_SCREAMING_SNAKE_CASE : Optional[int] = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_()-> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
SCREAMING_SNAKE_CASE__ = precision
SCREAMING_SNAKE_CASE__ = ceil(precision / 14 )
SCREAMING_SNAKE_CASE__ = 426_880 * Decimal(10_005 ).sqrt()
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 13_591_409
SCREAMING_SNAKE_CASE__ = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_A = 5_0
print(F'The first {n} digits of pi is: {pi(n)}')
| 159 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=4 ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[Any] = use_attention_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Tuple = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( __A , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxBertModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _a , )
| 430 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Optional[Any] = logging.getLogger()
_lowerCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
UpperCamelCase = {'source': 'What is love ?', 'target': 'life'}
UpperCamelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCamelCase__ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str = "pytorch" ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = os.path.join(UpperCamelCase__ , 'output' )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'data' )
self._create_dummy_data(data_dir=UpperCamelCase__ )
UpperCamelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
UpperCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'metrics.json' )
with open(UpperCamelCase__ ) as f:
UpperCamelCase = json.load(UpperCamelCase__ )
return result
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 430 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 714 |
UpperCAmelCase_ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( _a : str ) -> int:
lowerCAmelCase_ : Any = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase_ : Stack[int] = Stack()
lowerCAmelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_a ) )
elif i in operators:
# RULE 2
operator_stack.push(_a )
elif i == ")":
# RULE 4
lowerCAmelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : Dict = operators[opr](_a , _a )
operand_stack.push(_a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ : Dict = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 440 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase__ : int = False
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[Any] = '''ybelkada/fonts'''
def __lowercase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def __lowercase ( _a , _a , _a ):
requires_backends(_a , ['''torch'''] )
_check_torch_version()
snake_case_ : List[str] = image_tensor.unsqueeze(0 )
snake_case_ : List[str] = torch.nn.functional.unfold(_a , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ : Tuple = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _a , _a , -1 )
snake_case_ : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def __lowercase ( _a , _a = 36 , _a = "black" , _a = "white" , _a = 5 , _a = 5 , _a = 5 , _a = 5 , _a = None , _a = None , ):
requires_backends(_a , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ : Optional[int] = textwrap.TextWrapper(width=80 )
snake_case_ : Any = wrapper.wrap(text=_a )
snake_case_ : Union[str, Any] = '''\n'''.join(_a )
if font_bytes is not None and font_path is None:
snake_case_ : Union[str, Any] = io.BytesIO(_a )
elif font_path is not None:
snake_case_ : Optional[Any] = font_path
else:
snake_case_ : Optional[int] = hf_hub_download(_a , '''Arial.TTF''' )
snake_case_ : List[str] = ImageFont.truetype(_a , encoding='''UTF-8''' , size=_a )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ : Tuple = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _a ) )
snake_case_, snake_case_, snake_case_, snake_case_ : int = temp_draw.textbbox((0, 0) , _a , _a )
# Create the actual image with a bit of padding around the text.
snake_case_ : Union[str, Any] = text_width + left_padding + right_padding
snake_case_ : Any = text_height + top_padding + bottom_padding
snake_case_ : Dict = Image.new('''RGB''' , (image_width, image_height) , _a )
snake_case_ : Dict = ImageDraw.Draw(_a )
draw.text(xy=(left_padding, top_padding) , text=_a , fill=_a , font=_a )
return image
def __lowercase ( _a , _a , **_a ):
requires_backends(_a , '''vision''' )
# Convert to PIL image if necessary
snake_case_ : Tuple = to_pil_image(_a )
snake_case_ : List[Any] = render_text(_a , **_a )
snake_case_ : Dict = max(header_image.width , image.width )
snake_case_ : str = int(image.height * (new_width / image.width) )
snake_case_ : str = int(header_image.height * (new_width / header_image.width) )
snake_case_ : Any = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ : Union[str, Any] = to_numpy_array(_a )
if infer_channel_dimension_format(_a ) == ChannelDimension.LAST:
snake_case_ : Any = to_channel_dimension_format(_a , ChannelDimension.LAST )
return new_image
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = ["""flattened_patches"""]
def __init__( self : Any , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : int = 2048 , lowercase_ : bool = False , **lowercase_ : List[Any] , ):
super().__init__(**lowercase_ )
snake_case_ : Union[str, Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
snake_case_ : Any = do_normalize
snake_case_ : Optional[Any] = do_convert_rgb
snake_case_ : Tuple = max_patches
snake_case_ : List[Any] = is_vqa
def _snake_case ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : dict , **lowercase_ : int ):
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
snake_case_ : Any = to_channel_dimension_format(lowercase_ , ChannelDimension.FIRST )
snake_case_ : int = torch.from_numpy(lowercase_ )
snake_case_, snake_case_ : str = patch_size['''height'''], patch_size['''width''']
snake_case_, snake_case_ : Dict = get_image_size(lowercase_ )
# maximize scale s.t.
snake_case_ : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
snake_case_ : Optional[int] = max(min(math.floor(scale * image_height / patch_height ) , lowercase_ ) , 1 )
snake_case_ : Optional[int] = max(min(math.floor(scale * image_width / patch_width ) , lowercase_ ) , 1 )
snake_case_ : Optional[int] = max(num_feasible_rows * patch_height , 1 )
snake_case_ : List[str] = max(num_feasible_cols * patch_width , 1 )
snake_case_ : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=lowercase_ , antialias=lowercase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ : int = torch_extract_patches(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : List[Any] = patches.shape
snake_case_ : List[str] = patches_shape[1]
snake_case_ : Dict = patches_shape[2]
snake_case_ : int = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ : Optional[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
snake_case_ : str = torch.arange(lowercase_ ).reshape([rows, 1] ).repeat(1 , lowercase_ ).reshape([rows * columns, 1] )
snake_case_ : Union[str, Any] = torch.arange(lowercase_ ).reshape([1, columns] ).repeat(lowercase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ : Tuple = row_ids.to(torch.floataa )
snake_case_ : Union[str, Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ : List[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ : int = torch.nn.functional.pad(lowercase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
snake_case_ : Optional[Any] = to_numpy_array(lowercase_ )
return result
def _snake_case ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple ):
if image.dtype == np.uinta:
snake_case_ : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
snake_case_ : str = np.mean(lowercase_ )
snake_case_ : Union[str, Any] = np.std(lowercase_ )
snake_case_ : Any = max(lowercase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , **lowercase_ )
def _snake_case ( self : Dict , lowercase_ : ImageInput , lowercase_ : Optional[str] = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : Any = patch_size if patch_size is not None else self.patch_size
snake_case_ : int = max_patches if max_patches is not None else self.max_patches
snake_case_ : str = self.is_vqa
if kwargs.get('''data_format''' , lowercase_ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
snake_case_ : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Union[str, Any] = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(lowercase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
snake_case_ : List[Any] = kwargs.pop('''font_bytes''' , lowercase_ )
snake_case_ : Optional[Any] = kwargs.pop('''font_path''' , lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Optional[int] = [header_text] * len(lowercase_ )
snake_case_ : Optional[int] = [
render_header(lowercase_ , header_text[i] , font_bytes=lowercase_ , font_path=lowercase_ )
for i, image in enumerate(lowercase_ )
]
if do_normalize:
snake_case_ : Optional[int] = [self.normalize(image=lowercase_ ) for image in images]
# convert to torch tensor and permute
snake_case_ : Optional[Any] = [
self.extract_flattened_patches(image=lowercase_ , max_patches=lowercase_ , patch_size=lowercase_ )
for image in images
]
# create attention mask in numpy
snake_case_ : int = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
snake_case_ : int = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=lowercase_ )
return encoded_outputs
| 123 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__)
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def _snake_case ( self : Optional[int] , **lowercase_ : List[Any] ):
snake_case_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
snake_case_ : Dict = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _snake_case ( self : Any , lowercase_ : Tuple , lowercase_ : str=None , lowercase_ : str="This is a photo of {}." ):
snake_case_ : Any = load_image(lowercase_ )
snake_case_ : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case_ : Dict = candidate_labels
snake_case_ : Union[str, Any] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case_ : Tuple = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
snake_case_ : Optional[int] = [text_inputs]
return inputs
def _snake_case ( self : List[Any] , lowercase_ : List[str] ):
snake_case_ : List[Any] = model_inputs.pop('''candidate_labels''' )
snake_case_ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowercase_ ):
snake_case_ : List[str] = text_inputs[0]
else:
# Batching case.
snake_case_ : Optional[int] = text_inputs[0][0]
snake_case_ : Optional[Any] = self.model(**lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Any] ):
snake_case_ : Dict = model_outputs.pop('''candidate_labels''' )
snake_case_ : Dict = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case_ : Tuple = probs.tolist()
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Dict = [scores]
elif self.framework == "tf":
snake_case_ : Optional[int] = stable_softmax(lowercase_ , axis=-1 )
snake_case_ : str = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case_ : int = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 123 | 1 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 332 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCamelCase : str =argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_UpperCamelCase : Any =parser.parse_args()
_UpperCamelCase : List[str] ='cpu'
_UpperCamelCase : Optional[int] ='a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_UpperCamelCase : int ='path-to-your-trained-model'
_UpperCamelCase : str =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCamelCase : int =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase : Union[str, Any] =pipe.to(device)
# to channels last
_UpperCamelCase : Optional[Any] =pipe.unet.to(memory_format=torch.channels_last)
_UpperCamelCase : List[str] =pipe.vae.to(memory_format=torch.channels_last)
_UpperCamelCase : Union[str, Any] =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCamelCase : int =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCamelCase : List[Any] =torch.randn(2, 4, 64, 64)
_UpperCamelCase : Tuple =torch.rand(1) * 999
_UpperCamelCase : Tuple =torch.randn(2, 77, 768)
_UpperCamelCase : int =(sample, timestep, encoder_hidden_status)
try:
_UpperCamelCase : Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCamelCase : List[str] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : Dict =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : Optional[int] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCamelCase : Union[str, Any] =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCamelCase : Tuple =666
_UpperCamelCase : List[Any] =torch.Generator(device).manual_seed(seed)
_UpperCamelCase : List[str] ={'generator': generator}
if args.steps is not None:
_UpperCamelCase : str =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCamelCase : List[Any] =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 332 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = PhobertTokenizer
snake_case__ = False
def a ( self : Union[str, Any] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l à</w>"]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = "Tôi là VinAI Research"
lowerCAmelCase__ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def a ( self : List[str] ) -> str:
lowerCAmelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = "Tôi là VinAI Research"
lowerCAmelCase__ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 61 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase = remove_duplicates(key.upper() )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase = alphabet[i - offset]
lowerCAmelCase = char
return cipher_alphabet
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
lowerCAmelCase = input("""Enter keyword: """ ).strip()
lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowerCAmelCase = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 532 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__: Dict = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
a__: Optional[int] = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
a__: List[Any] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] )->Optional[Any]:
return float((preds == labels).mean() )
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : int="binary" )->str:
A__ = simple_accuracy(lowercase_ , lowercase_ )
A__ = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] )->Tuple:
A__ = {}
for id_pred, label in zip(lowercase_ , lowercase_ ):
A__ = f"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
A__ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A__ = [(pred, label)]
A__ = [], []
for question, preds_labels in question_map.items():
A__ = zip(*lowercase_ )
A__ = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average='''macro''' )
fas.append(lowercase_ )
A__ = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
A__ = float(sum(lowercase_ ) / len(lowercase_ ) )
A__ = sum(lowercase_ ) / len(lowercase_ )
A__ = float(fa_score(y_true=lowercase_ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def UpperCamelCase ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),codebase_urls=[],reference_urls=[],format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None,)
def UpperCamelCase ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a,__a )}
elif self.config_name == "cb":
return acc_and_fa(__a,__a,fa_avg='''macro''' )
elif self.config_name == "record":
A__ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
A__ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a,__a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a,__a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a,__a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 707 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__: Union[str, Any] = logging.getLogger(__name__)
def UpperCamelCase__( )->List[Any]:
A__ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=UpperCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=UpperCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=UpperCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
A__ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A__ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"{len(UpperCamelCase__ )} examples to process." )
A__ = []
A__ = 0
A__ = 1_00_00
A__ = time.time()
for text in data:
A__ = f"{bos} {text.strip()} {sep}"
A__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A__ = time.time()
logger.info('''Finished binarization''' )
logger.info(f"{len(UpperCamelCase__ )} examples processed." )
A__ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
A__ = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , UpperCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 212 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCamelCase ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Dict = StableUnCLIPPipeline
__UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCamelCase : Optional[int] = False
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = 32
UpperCamelCase_: str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase_: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case_ , projection_dim=snake_case_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase_: Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case_ , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase_: Any = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=snake_case_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase_: str = StableUnCLIPImageNormalizer(embedding_dim=snake_case_ )
UpperCamelCase_: Union[str, Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case_ , layers_per_block=1 , upcast_attention=snake_case_ , use_linear_projection=snake_case_ , )
torch.manual_seed(0 )
UpperCamelCase_: Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=snake_case_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase_: Tuple = AutoencoderKL()
UpperCamelCase_: Any = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase__ ( self : List[str] , snake_case_ : int , snake_case_ : int=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: Tuple = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: Dict = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case_ )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
UpperCamelCase_: Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase_: str = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase_: Union[str, Any] = pipe("""anime turle""" , generator=snake_case_ , output_type="""np""" )
UpperCamelCase_: Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase_: Optional[int] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
UpperCamelCase_: Tuple = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase_: List[str] = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase_: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 548 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = False, False, False
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__UpperCamelCase : str = field(default="""Audio""" , init=_A , repr=_A )
def __call__( self : List[Any] ):
return self.pa_type
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(snake_case_ , snake_case_ ):
return {"bytes": None, "path": value}
elif isinstance(snake_case_ , snake_case_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCamelCase_: List[Any] = BytesIO()
sf.write(snake_case_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCamelCase_: str = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
UpperCamelCase_: Dict = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2767
UpperCamelCase_: Dict = BytesIO(bytes() )
sf.write(snake_case_ , snake_case_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : dict , snake_case_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
UpperCamelCase_, UpperCamelCase_: Any = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
UpperCamelCase_: Optional[Any] = xsplitext(snake_case_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
UpperCamelCase_: Dict = token_per_repo_id or {}
UpperCamelCase_: str = path.split("""::""" )[-1]
try:
UpperCamelCase_: Any = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCamelCase_: List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCamelCase_: Any = None
with xopen(snake_case_ , """rb""" , use_auth_token=snake_case_ ) as f:
UpperCamelCase_, UpperCamelCase_: List[Any] = sf.read(snake_case_ )
else:
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = sf.read(snake_case_ )
UpperCamelCase_: List[str] = array.T
if self.mono:
UpperCamelCase_: int = librosa.to_mono(snake_case_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCamelCase_: Union[str, Any] = librosa.resample(snake_case_ , orig_sr=snake_case_ , target_sr=self.sampling_rate )
UpperCamelCase_: Dict = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase__ ( self : Tuple ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowerCAmelCase__ ( self : Any , snake_case_ : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCamelCase_: Optional[Any] = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
UpperCamelCase_: List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase_: str = pa.array([None] * len(snake_case_ ) , type=pa.string() )
UpperCamelCase_: List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
UpperCamelCase_: Union[str, Any] = pa.array([Audio().encode_example(snake_case_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCamelCase_: Optional[Any] = storage.field("""bytes""" )
else:
UpperCamelCase_: str = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCamelCase_: List[str] = storage.field("""path""" )
else:
UpperCamelCase_: int = pa.array([None] * len(snake_case_ ) , type=pa.string() )
UpperCamelCase_: Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(snake_case_ , self.pa_type )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(snake_case_ : Optional[int] ):
with xopen(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = f.read()
return bytes_
UpperCamelCase_: int = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase_: Union[str, Any] = pa.array(
[os.path.basename(snake_case_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCamelCase_: Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type )
| 548 | 1 |
"""simple docstring"""
UpperCamelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
assert len(str(UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a_ = year // 100
a_ = (5 * (century % 4) + 2) % 7
a_ = year % 100
a_ = centurian % 12
a_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class snake_case :
a_ : List[str]
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self) ->Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class snake_case :
a_ : Optional[List] = None
a_ : Optional[int] = None
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = sorted(set(self.languages)) if self.languages else None
a_ = len(self.languages) if self.languages else None
def __call__( self) ->Any:
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = set(self.languages)
if self.languages and set(__UpperCAmelCase) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(__UpperCAmelCase) - lang_set))}) are not in valid set ({", ".join(__UpperCAmelCase)}).''')
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
a_ , a_ = zip(*sorted(__UpperCAmelCase))
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
} | 210 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case__ ) - 1
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) ,5 ) == 1
return output_values
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : int = self.basis_function(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self ,snake_case__ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : int = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Dict = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ ,snake_case__ ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(snake_case__ ,snake_case__ ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 105 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
self.check_model_type(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE_ : Any = padding
if truncation is not None:
SCREAMING_SNAKE_CASE_ : Tuple = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,snake_case__ ,snake_case__ = None ,**snake_case__ ):
if isinstance(snake_case__ ,(Image.Image, str) ) and isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'image': image, 'question': question}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = image
SCREAMING_SNAKE_CASE_ : List[Any] = super().__call__(snake_case__ ,**snake_case__ )
return results
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
inputs['question'] ,return_tensors=self.framework ,padding=snake_case__ ,truncation=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(images=snake_case__ ,return_tensors=self.framework )
model_inputs.update(snake_case__ )
return model_inputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model(**snake_case__ )
return model_outputs
def snake_case ( self ,snake_case__ ,snake_case__=5 ):
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Any = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = probs.topk(snake_case__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ ,snake_case__ )]
| 105 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __snake_case ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
def __a ( self : str ):
"""simple docstring"""
return {}
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
SCREAMING_SNAKE_CASE__ = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MarkupLMFeatureExtractionTester(self )
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE__ = get_html_strings()[0]
SCREAMING_SNAKE_CASE__ = feature_extractor(_lowercase )
# fmt: off
SCREAMING_SNAKE_CASE__ = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
SCREAMING_SNAKE_CASE__ = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , _lowercase )
self.assertEqual(encoding.xpaths , _lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ = get_html_strings()
SCREAMING_SNAKE_CASE__ = feature_extractor(_lowercase )
# fmt: off
SCREAMING_SNAKE_CASE__ = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
SCREAMING_SNAKE_CASE__ = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _lowercase )
self.assertEqual(encoding.xpaths , _lowercase )
| 379 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=7_00_00 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : List[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : Dict = (iris.target != 0) * 1
__lowerCamelCase : List[str] = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 379 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
while a != 0:
__lowercase ,__lowercase : Tuple = b % a, a
return b
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if gcd(__UpperCamelCase , __UpperCamelCase ) != 1:
__lowercase : Union[str, Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__UpperCamelCase )
__lowercase ,__lowercase ,__lowercase : str = 1, 0, a
__lowercase ,__lowercase ,__lowercase : str = 0, 1, m
while va != 0:
__lowercase : Union[str, Any] = ua // va
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 76 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__snake_case = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 0 |
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
a__ = [[float('inf' ) for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
a__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCAmelCase ):
# looping through rows of graph array
for i in range(__lowerCAmelCase ):
# looping through columns of graph array
for j in range(__lowerCAmelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
a__ = dist[i][k] + dist[k][j]
_print_dist(__lowerCAmelCase , __lowerCAmelCase )
return dist, v
if __name__ == "__main__":
snake_case = int(input('''Enter number of vertices: '''))
snake_case = int(input('''Enter number of edges: '''))
snake_case = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
snake_case = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
snake_case = int(input('''Enter source:'''))
snake_case = int(input('''Enter destination:'''))
snake_case = float(input('''Enter weight:'''))
snake_case = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = ["""pixel_values"""]
def __init__( self :Optional[int] , __snake_case :bool = True , __snake_case :Union[int, float] = 1 / 2_55 , __snake_case :bool = True , __snake_case :int = 8 , **__snake_case :int , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Optional[Any] =do_rescale
__magic_name__ : List[Any] =rescale_factor
__magic_name__ : Dict =do_pad
__magic_name__ : Tuple =pad_size
def A__ ( self :List[str] , __snake_case :np.ndarray , __snake_case :float , __snake_case :Optional[Union[str, ChannelDimension]] = None , **__snake_case :Tuple ):
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def A__ ( self :List[Any] , __snake_case :np.ndarray , __snake_case :int , __snake_case :Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[int] =get_image_size(__snake_case )
__magic_name__ : List[Any] =(old_height // size + 1) * size - old_height
__magic_name__ : Union[str, Any] =(old_width // size + 1) * size - old_width
return pad(__snake_case , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__snake_case )
def A__ ( self :Union[str, Any] , __snake_case :ImageInput , __snake_case :Optional[bool] = None , __snake_case :Optional[float] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[int] = None , __snake_case :Optional[Union[str, TensorType]] = None , __snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__snake_case :Tuple , ):
'''simple docstring'''
__magic_name__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Dict =do_pad if do_pad is not None else self.do_pad
__magic_name__ : Union[str, Any] =pad_size if pad_size is not None else self.pad_size
__magic_name__ : int =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__magic_name__ : Optional[int] =[to_numpy_array(__snake_case ) for image in images]
if do_rescale:
__magic_name__ : Any =[self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_pad:
__magic_name__ : Optional[Any] =[self.pad(__snake_case , size=__snake_case ) for image in images]
__magic_name__ : Any =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__magic_name__ : Dict ={"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 21 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = 5
# Realm tok
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A_ , exist_ok=A_ )
_UpperCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A_ , exist_ok=A_ )
def a ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
_UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def a ( self ):
_UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def a ( self ):
_UpperCamelCase = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A_ , )
return block_records
def a ( self ):
_UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def a ( self ):
_UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 138 | 0 |
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =None
_lowercase =None
_lowercase =graph
self._normalize_graph(lowerCAmelCase , lowerCAmelCase )
_lowercase =len(lowerCAmelCase )
_lowercase =None
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
if sources is int:
_lowercase =[sources]
if sinks is int:
_lowercase =[sinks]
if len(lowerCAmelCase ) == 0 or len(lowerCAmelCase ) == 0:
return
_lowercase =sources[0]
_lowercase =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCAmelCase ) > 1 or len(lowerCAmelCase ) > 1:
_lowercase =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowercase =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_lowercase =max_input_flow
_lowercase =0
_lowercase =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowercase =max_input_flow
_lowercase =size - 1
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =algorithm(self )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =flow_network
_lowercase =flow_network.verticesCount
_lowercase =flow_network.sourceIndex
_lowercase =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowercase =flow_network.graph
_lowercase =False
def A__ ( self ) -> str:
'''simple docstring'''
if not self.executed:
self._algorithm()
_lowercase =True
def A__ ( self ) -> int:
'''simple docstring'''
pass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
# use this to save your result
_lowercase =-1
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =[[0] * self.verticies_count for i in range(self.verticies_count )]
_lowercase =[0] * self.verticies_count
_lowercase =[0] * self.verticies_count
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowercase =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowercase =0
while i < len(lowerCAmelCase ):
_lowercase =vertices_list[i]
_lowercase =self.heights[vertex_index]
self.process_vertex(lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCAmelCase ) )
_lowercase =0
else:
i += 1
_lowercase =sum(self.preflow[self.source_index] )
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCAmelCase , lowerCAmelCase )
self.relabel(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowercase =self.heights[to_index]
if min_height is not None:
_lowercase =min_height + 1
if __name__ == "__main__":
lowercase_ = [0]
lowercase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase_ = flow_network.find_maximum_flow()
print(f"maximum flow is {maximum_flow}")
| 380 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a ( A__ : Tuple ) -> Any:
"""simple docstring"""
_lowercase =model.config
_lowercase =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowercase =MBartConfig(
is_decoder=A__ , is_encoder_decoder=A__ , add_cross_attention=A__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A__ , add_final_layer_norm=A__ , )
return encoder_config, decoder_config
def a ( A__ : Union[str, Any] ) -> str:
"""simple docstring"""
if "encoder.model" in name:
_lowercase =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowercase =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowercase =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowercase ='encoder.' + name
if "attn.proj" in name:
_lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowercase ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
_lowercase ='encoder.layernorm.bias'
return name
def a ( A__ : Optional[Any] , A__ : List[Any] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(A__ )
if "qkv" in key:
_lowercase =key.split('.' )
_lowercase =int(key_split[3] )
_lowercase =int(key_split[5] )
_lowercase =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowercase =val
return orig_state_dict
def a ( A__ : str , A__ : List[str]=None , A__ : List[Any]=False ) -> List[str]:
"""simple docstring"""
_lowercase =DonutModel.from_pretrained(A__ ).eval()
# load HuggingFace model
_lowercase , _lowercase =get_configs(A__ )
_lowercase =DonutSwinModel(A__ )
_lowercase =MBartForCausalLM(A__ )
_lowercase =VisionEncoderDecoderModel(encoder=A__ , decoder=A__ )
model.eval()
_lowercase =original_model.state_dict()
_lowercase =convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# verify results on scanned document
_lowercase =load_dataset('hf-internal-testing/example-documents' )
_lowercase =dataset['test'][0]['image'].convert('RGB' )
_lowercase =XLMRobertaTokenizerFast.from_pretrained(A__ , from_slow=A__ )
_lowercase =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowercase =DonutProcessor(A__ , A__ )
_lowercase =processor(A__ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowercase ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_lowercase ='When is the coffee break?'
_lowercase =task_prompt.replace('{user_input}' , A__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowercase ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowercase ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowercase ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowercase ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowercase ='hello world'
else:
raise ValueError('Model name not supported' )
_lowercase =original_model.decoder.tokenizer(A__ , add_special_tokens=A__ , return_tensors='pt' )[
'input_ids'
]
_lowercase =original_model.encoder.model.patch_embed(A__ )
_lowercase , _lowercase =model.encoder.embeddings(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
# verify encoder hidden states
_lowercase =original_model.encoder(A__ )
_lowercase =model.encoder(A__ ).last_hidden_state
assert torch.allclose(A__ , A__ , atol=1e-2 )
# verify decoder hidden states
_lowercase =original_model(A__ , A__ , A__ ).logits
_lowercase =model(A__ , decoder_input_ids=A__ ).logits
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 380 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 0 |
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
snake_case__ : Any =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case__, snake_case__ : Optional[int] =True, True
snake_case__ : List[str] =dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return path
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Tuple =0
snake_case__ : List[Any] =-1
for i in range(_UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case__ : Optional[int] =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : List[Any] =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case__, snake_case__ : Union[str, Any] =check_circuit_or_path(_UpperCamelCase , _UpperCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
snake_case__ : List[Any] =1
if check == 2:
snake_case__ : int =odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
snake_case__ : Tuple =dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(_UpperCamelCase )
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Optional[Any] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case__ : Dict ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case__ : Tuple ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case__ : Tuple ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case__ : List[str] ={
1: [],
2: []
# all degree is zero
}
snake_case__ : Union[str, Any] =10
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''openai-gpt'''
lowerCAmelCase__ ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4_0478 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE="cls_index" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] =vocab_size
snake_case__ : Optional[int] =n_positions
snake_case__ : Union[str, Any] =n_embd
snake_case__ : Dict =n_layer
snake_case__ : Dict =n_head
snake_case__ : Optional[int] =afn
snake_case__ : Tuple =resid_pdrop
snake_case__ : str =embd_pdrop
snake_case__ : Tuple =attn_pdrop
snake_case__ : Optional[int] =layer_norm_epsilon
snake_case__ : Any =initializer_range
snake_case__ : List[str] =summary_type
snake_case__ : Dict =summary_use_proj
snake_case__ : Any =summary_activation
snake_case__ : Optional[Any] =summary_first_dropout
snake_case__ : Tuple =summary_proj_to_labels
super().__init__(**__SCREAMING_SNAKE_CASE )
| 408 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a_ ( __snake_case : Dict , __snake_case : Dict ) -> Dict:
"""simple docstring"""
inspect_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ =path + ".py"
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a_ ( __snake_case : str , __snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
inspect_metric(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ =path + ".py"
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ =get_dataset_config_info(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Dict ) -> int:
"""simple docstring"""
with pytest.raises(lowerCAmelCase__ ):
get_dataset_config_info(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a_ ( __snake_case : Tuple , __snake_case : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_dataset_config_names(lowerCAmelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_dataset_infos(lowerCAmelCase__ )
assert list(infos.keys() ) == expected_configs
lowerCamelCase_ =expected_configs[0]
assert expected_config in infos
lowerCamelCase_ =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_dataset_infos(lowerCAmelCase__ )
assert expected_config in infos
lowerCamelCase_ =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a_ ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with pytest.raises(lowerCAmelCase__ ):
get_dataset_split_names(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
| 676 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 642 | 0 |
"""simple docstring"""
import math
import qiskit
def UpperCAmelCase_ ( __a : int = 1 , __a : int = 1 , __a : int = 1 ):
'''simple docstring'''
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_lowerCamelCase : List[str] = qiskit.QuantumRegister(4 , 'qr' )
_lowerCamelCase : Optional[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_lowerCamelCase : int = [input_a, input_a, carry_in]
_lowerCamelCase : str = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
_lowerCamelCase : Tuple = qiskit.execute(__a , __a , shots=10_00 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 701 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowerCamelCase : Optional[Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
_lowerCamelCase : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A , A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_lowerCamelCase : Dict = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = image_processor(A , return_tensors='np' )
_lowerCamelCase : int = processor(images=A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Dict = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Optional[Any] = 'lower newer'
_lowerCamelCase : Union[str, Any] = processor(text=A )
_lowerCamelCase : Tuple = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Any = 'lower newer'
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : int = processor.batch_decode(A )
_lowerCamelCase : Dict = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : str = 'lower newer'
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 349 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase : int = NewType("""DataClass""", Any)
UpperCAmelCase : List[str] = NewType("""DataClassType""", Any)
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
a__ : List[Any] ={str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a__ : Union[str, Any] ={}
if aliases is not None:
a__ : Optional[int] =aliases
if help is not None:
a__ : Any =help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Iterable[DataClassType]
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if "formatter_class" not in kwargs:
a__ : Union[str, Any] =ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase__ )
if dataclasses.is_dataclass(lowerCAmelCase__ ):
a__ : Tuple =[dataclass_types]
a__ : Optional[Any] =list(lowerCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase__ )
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =F'''--{field.name}'''
a__ : Dict =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
a__ : str =kwargs.pop("aliases" , [] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[Any] =[aliases]
a__ : Tuple =getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase__ , "UnionType" ) and isinstance(lowerCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
a__ : Union[str, Any] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a__ : Optional[int] =getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a__ : List[str] =(
field.type.__args__[0] if isinstance(lowerCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
a__ : str =getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a__ : List[str] ={}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase__ ) and issubclass(field.type , lowerCAmelCase__ )):
if origin_type is Literal:
a__ : Optional[int] =field.type.__args__
else:
a__ : List[str] =[x.value for x in field.type]
a__ : Any =make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
a__ : str =field.default
else:
a__ : Dict =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a__ : Optional[Any] =copy(lowerCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
a__ : Optional[int] =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a__ : Optional[Any] =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a__ : str =default
# This tells argparse we accept 0 or 1 value after --field_name
a__ : Dict ="?"
# This is the value that will get picked if we do --field_name (without value)
a__ : List[str] =True
elif isclass(lowerCAmelCase__ ) and issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any =field.type.__args__[0]
a__ : Union[str, Any] ="+"
if field.default_factory is not dataclasses.MISSING:
a__ : Optional[int] =field.default_factory()
elif field.default is dataclasses.MISSING:
a__ : List[Any] =True
else:
a__ : Optional[Any] =field.type
if field.default is not dataclasses.MISSING:
a__ : Optional[int] =field.default
elif field.default_factory is not dataclasses.MISSING:
a__ : Any =field.default_factory()
else:
a__ : Tuple =True
parser.add_argument(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a__ : int =False
parser.add_argument(F'''--no_{field.name}''' , action="store_false" , dest=field.name , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if hasattr(lowerCAmelCase__ , "_argument_group_name" ):
a__ : Union[str, Any] =self.add_argument_group(dtype._argument_group_name )
else:
a__ : Union[str, Any] =self
try:
a__ : Dict[str, type] =get_type_hints(lowerCAmelCase__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCAmelCase__ ):
a__ : Any =".".join(map(lowerCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase__ ):
if not field.init:
continue
a__ : List[str] =type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a__ : List[str] =[]
if args_filename:
args_files.append(Path(lowerCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a__ : List[Any] =ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase__ , type=lowerCAmelCase__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
a__ , a__ : Tuple =args_file_parser.parse_known_args(args=lowerCAmelCase__ )
a__ : Any =vars(lowerCAmelCase__ ).get(args_file_flag.lstrip("-" ) , lowerCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase__ ) for p in cmd_args_file_paths] )
a__ : List[Any] =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a__ : Optional[Any] =file_args + args if args is not None else file_args + sys.argv[1:]
a__ , a__ : Any =self.parse_known_args(args=lowerCAmelCase__ )
a__ : List[Any] =[]
for dtype in self.dataclass_types:
a__ : str ={f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
a__ : str ={k: v for k, v in vars(lowerCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
a__ : List[Any] =set(args.keys() )
a__ : Dict =[]
for dtype in self.dataclass_types:
a__ : Union[str, Any] ={f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
a__ : Optional[int] ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a__ : Dict =dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase__ )}''' )
return tuple(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(lowerCAmelCase__ ) , encoding="utf-8" ) as open_json_file:
a__ : Optional[Any] =json.loads(open_json_file.read() )
a__ : Optional[Any] =self.parse_dict(lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
a__ : Any =self.parse_dict(yaml.safe_load(Path(lowerCAmelCase__ ).read_text() ) , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 563 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 563 | 1 |
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if len(lowerCAmelCase__ ) <= 1:
return lst
__snake_case = 1
while i < len(lowerCAmelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__snake_case = lst[i], lst[i - 1]
i -= 1
if i == 0:
__snake_case = 1
return lst
if __name__ == "__main__":
A : Tuple = input('Enter numbers separated by a comma:\n').strip()
A : Optional[int] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 709 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
A : List[Any] = TypeVar('T')
A : Dict = Union[List[T], Tuple[T, ...]]
A : Any = Union[T, List[T], Dict[str, T]]
A : Optional[int] = Union[str, bytes, os.PathLike]
| 473 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : str=1_4 , __snake_case : Any=7 , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : int=9_9 , __snake_case : Tuple=3_2 , __snake_case : List[str]=5 , __snake_case : Optional[Any]=4 , __snake_case : Optional[int]=3_7 , __snake_case : Optional[int]="gelu" , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=5_1_2 , __snake_case : List[Any]=1_6 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : int=4 , __snake_case : Dict=None , ) -> List[str]:
__magic_name__: Tuple = parent
__magic_name__: int = batch_size
__magic_name__: str = seq_length
__magic_name__: Optional[int] = is_training
__magic_name__: Dict = use_token_type_ids
__magic_name__: str = use_input_mask
__magic_name__: int = use_labels
__magic_name__: Union[str, Any] = use_mc_token_ids
__magic_name__: str = vocab_size
__magic_name__: List[str] = hidden_size
__magic_name__: Any = num_hidden_layers
__magic_name__: int = num_attention_heads
__magic_name__: Dict = intermediate_size
__magic_name__: Optional[Any] = hidden_act
__magic_name__: Any = hidden_dropout_prob
__magic_name__: Dict = attention_probs_dropout_prob
__magic_name__: List[Any] = max_position_embeddings
__magic_name__: List[str] = type_vocab_size
__magic_name__: int = type_sequence_label_size
__magic_name__: Optional[int] = initializer_range
__magic_name__: Optional[int] = num_labels
__magic_name__: str = num_choices
__magic_name__: Optional[Any] = scope
__magic_name__: Union[str, Any] = self.vocab_size - 1
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__: Dict = None
if self.use_input_mask:
__magic_name__: Any = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__: List[Any] = None
if self.use_token_type_ids:
__magic_name__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__: Dict = None
if self.use_mc_token_ids:
__magic_name__: Any = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__magic_name__: List[Any] = None
__magic_name__: Any = None
__magic_name__: List[Any] = None
if self.use_labels:
__magic_name__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__: Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__: Tuple = self.get_config()
__magic_name__: Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCamelCase__ ( self : Any , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , *__snake_case : Dict ) -> Any:
__magic_name__: Any = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
model(__snake_case , token_type_ids=__snake_case )
__magic_name__: List[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Dict , *__snake_case : Any ) -> Union[str, Any]:
__magic_name__: List[Any] = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__: List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): int = config_and_inputs
__magic_name__: int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] , *__snake_case : Union[str, Any] ) -> str:
__magic_name__: int = self.num_labels
__magic_name__: int = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: int = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: str = CTRLModelTester(self )
__magic_name__: Optional[int] = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
__magic_name__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
pass
@slow
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: Union[str, Any] = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
@require_torch
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: Optional[Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__snake_case )
__magic_name__: List[str] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is
__magic_name__: List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__magic_name__: Optional[int] = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 96 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = set(UpperCAmelCase ), [start]
while stack:
lowercase__ : List[Any] = stack.pop()
explored.add(UpperCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase )
return explored
__a: Optional[Any] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 718 | '''simple docstring'''
from random import randint, random
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = 5 , ):
lowercase__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = max(UpperCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : str = (
randint(0 , UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = 0
lowercase__ : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase , -1 )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = len(UpperCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : List[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : int = min(highway_now[car_index] + 1 , UpperCAmelCase )
# Number of empty cell before the next car
lowercase__ : Dict = get_distance(UpperCAmelCase , UpperCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : int = min(next_highway[car_index] , UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(highway[0] )
for i in range(UpperCAmelCase ):
lowercase__ : Union[str, Any] = update(highway[i] , UpperCAmelCase , UpperCAmelCase )
lowercase__ : Dict = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
lowercase__ : int = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : Union[str, Any] = speed
highway.append(UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = data
def __iter__( self ):
for element in self.data:
yield element
def __magic_name__ ( SCREAMING_SNAKE_CASE=True ) -> List[Any]:
_lowercase : List[str] = Accelerator(even_batches=SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
if iterable:
_lowercase : Optional[int] = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE ) ) )
else:
_lowercase : Union[str, Any] = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE ) ) )
_lowercase : Any = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE )
return dl
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]:
_lowercase : Union[str, Any] = create_dataloader(accelerator=SCREAMING_SNAKE_CASE , dataset_size=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
_lowercase : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __magic_name__ ( ) -> Any:
_lowercase : Dict = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __magic_name__ ( ) -> List[str]:
_lowercase : Dict = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __magic_name__ ( ) -> int:
_lowercase : Optional[Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = torch.nn.Linear(1 , 1 )
_lowercase : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
_lowercase : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = ddp_model(batch[0].float() )
_lowercase : List[Any] = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def __magic_name__ ( ) -> Optional[int]:
_lowercase : Union[str, Any] = True
_lowercase : Any = False
_lowercase : Tuple = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
_lowercase : List[str] = torch.nn.Linear(1 , 1 )
_lowercase : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE )
_lowercase : Any = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
_lowercase : str = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
_lowercase : str = train_dl.batch_sampler.even_batches
_lowercase : Tuple = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __magic_name__ ( ) -> Tuple:
_lowercase : Any = True
_lowercase : Union[str, Any] = False
_lowercase : Tuple = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = torch.nn.Linear(1 , 1 )
_lowercase : str = accelerator.prepare(SCREAMING_SNAKE_CASE )
create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE )
_lowercase : Dict = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
_lowercase : Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __magic_name__ ( ) -> Tuple:
_lowercase : Optional[Any] = create_accelerator()
_lowercase : str = torch.nn.Linear(1 , 1 )
_lowercase : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE )
create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def __magic_name__ ( ) -> List[str]:
_lowercase : List[str] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
_lowercase : Tuple = accelerator.state.distributed_type
_lowercase : List[str] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = original_state
if __name__ == "__main__":
main()
| 66 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(lowercase_) , torch_builtin(lowercase_)))
self.assertFalse(torch.allclose(gelu_python(lowercase_) , gelu_new(lowercase_)))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Dict = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : Tuple = get_activation('''gelu_10''')
SCREAMING_SNAKE_CASE_ : Any = torch_builtin(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = geluaa(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(lowercase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(lowercase_):
get_activation('''bogus''')
with self.assertRaises(lowercase_):
get_activation(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : List[Any] = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = acta.a
| 512 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = IFPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def __a ( self : List[Any] , _lowercase : List[str] , _lowercase : Optional[Any]=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __a ( self : str ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __a ( self : Dict ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self : Dict ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self : Any ):
"""simple docstring"""
self._test_save_load_local()
def __a ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : Union[str, Any] , _lowercase : Any , _lowercase : List[str] , _lowercase : Dict , _lowercase : int ):
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def __a ( self : Dict , _lowercase : Any , _lowercase : str , _lowercase : List[Any] , _lowercase : Optional[int] ):
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def __a ( self : Any , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 713 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=7_00_00 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : List[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : Dict = (iris.target != 0) * 1
__lowerCamelCase : List[str] = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 379 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
self.check_model_type(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , lowerCamelCase__ : Union["Image.Image", str] , lowerCamelCase__ : str = None , **lowerCamelCase__ : int ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (Image.Image, str) ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
return results
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=False ) -> int:
"""simple docstring"""
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowercase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase__ )
return model_inputs
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(lowerCamelCase__ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 332 |
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
__lowercase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A( UpperCamelCase__ : int = 100 ) -> int:
'''simple docstring'''
__lowercase = 1
__lowercase = 2
for i in range(2 , max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 332 | 1 |
from collections.abc import Sequence
def lowercase_( SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
lowerCamelCase : Dict = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase : int = nums[i]
lowerCamelCase : Optional[Any] = max(SCREAMING_SNAKE_CASE_ , ans + num , SCREAMING_SNAKE_CASE_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case = int(input('''Enter number of elements : ''').strip())
_snake_case = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 231 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : torch.FloatTensor
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 16 , __A = 88 , __A = None , __A = None , __A = 1 , __A = 0.0 , __A = 32 , __A = None , __A = False , __A = None , __A = "geglu" , __A = True , __A = True , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : str = attention_head_dim
lowerCamelCase : Union[str, Any] = num_attention_heads * attention_head_dim
lowerCamelCase : List[str] = in_channels
lowerCamelCase : Optional[int] = torch.nn.GroupNorm(num_groups=__A , num_channels=__A , eps=1e-6 , affine=__A )
lowerCamelCase : Any = nn.Linear(__A , __A )
# 3. Define transformers blocks
lowerCamelCase : int = nn.ModuleList(
[
BasicTransformerBlock(
__A , __A , __A , dropout=__A , cross_attention_dim=__A , activation_fn=__A , attention_bias=__A , double_self_attention=__A , norm_elementwise_affine=__A , )
for d in range(__A )
] )
lowerCamelCase : Any = nn.Linear(__A , __A )
def _snake_case ( self , __A , __A=None , __A=None , __A=None , __A=1 , __A=None , __A = True , ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = hidden_states.shape
lowerCamelCase : Optional[int] = batch_frames // num_frames
lowerCamelCase : List[str] = hidden_states
lowerCamelCase : List[Any] = hidden_states[None, :].reshape(__A , __A , __A , __A , __A )
lowerCamelCase : Optional[int] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase : List[str] = self.norm(__A )
lowerCamelCase : Dict = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __A , __A )
lowerCamelCase : str = self.proj_in(__A )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase : Optional[int] = block(
__A , encoder_hidden_states=__A , timestep=__A , cross_attention_kwargs=__A , class_labels=__A , )
# 3. Output
lowerCamelCase : Dict = self.proj_out(__A )
lowerCamelCase : Any = (
hidden_states[None, None, :]
.reshape(__A , __A , __A , __A , __A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase : Tuple = hidden_states.reshape(__A , __A , __A , __A )
lowerCamelCase : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__A )
| 231 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : List[str] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase : int = get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = mock.Mock()
a__ : Any = 500
a__ : List[Any] = {}
a__ : List[str] = HTTPError
a__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
a__ : Dict = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase) as mock_head:
a__ : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def __lowercase ( self) -> int:
'''simple docstring'''
with self.assertRaises(lowercase):
# config is in subfolder, the following should not work without specifying the subfolder
a__ : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
a__ : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor')
self.assertIsNotNone(lowercase)
@is_staging_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase)
@classmethod
def __lowercase ( cls) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token)
a__ : Dict = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='test-image-processor' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : List[str] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token)
a__ : Any = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : int = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
a__ : List[Any] = CustomImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
a__ : str = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=lowercase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor')
| 302 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "CLIPImageProcessor"
SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : int , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__A = kwargs.pop("""feature_extractor""" )
__A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : List[str] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__A = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Optional[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Dict , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase__ , )
return self.image_processor
| 701 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A = 3_8_4
__A = 7
if "tiny" in model_name:
__A = 9_6
__A = (2, 2, 6, 2)
__A = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__A = 9_6
__A = (2, 2, 1_8, 2)
__A = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__A = 1_2_8
__A = (2, 2, 1_8, 2)
__A = (4, 8, 1_6, 3_2)
__A = 1_2
__A = 5_1_2
elif "large" in model_name:
__A = 1_9_2
__A = (2, 2, 1_8, 2)
__A = (6, 1_2, 2_4, 4_8)
__A = 1_2
__A = 7_6_8
# set label information
__A = 1_5_0
__A = """huggingface/label-files"""
__A = """ade20k-id2label.json"""
__A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="""dataset""" ) , """r""" ) )
__A = {int(__lowercase ): v for k, v in idalabel.items()}
__A = {v: k for k, v in idalabel.items()}
__A = SwinConfig(
embed_dim=__lowercase , depths=__lowercase , num_heads=__lowercase , window_size=__lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
__A = dct.pop(__lowercase )
__A = val
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : int ) -> Any:
"""simple docstring"""
__A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__A = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__A = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[:dim, :]
__A = in_proj_bias[: dim]
__A = in_proj_weight[
dim : dim * 2, :
]
__A = in_proj_bias[
dim : dim * 2
]
__A = in_proj_weight[
-dim :, :
]
__A = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> List[str]:
"""simple docstring"""
__A , __A = x.shape
__A = x.reshape(__lowercase , 4 , in_channel // 4 )
__A = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowercase , __lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A , __A = x.shape
__A = x.reshape(__lowercase , in_channel // 4 , 4 )
__A = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowercase , __lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
__A = x.shape[0]
__A = x.reshape(4 , in_channel // 4 )
__A = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__A = x.shape[0]
__A = x.reshape(in_channel // 4 , 4 )
__A = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Dict , __lowercase : List[Any] , __lowercase : str ) -> Tuple:
"""simple docstring"""
__A = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__A = model_name_to_url[model_name]
__A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" , file_name=__lowercase )[
"""state_dict"""
]
for name, param in state_dict.items():
print(__lowercase , param.shape )
__A = get_upernet_config(__lowercase )
__A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__A = state_dict.pop(__lowercase )
if "bn" in key:
__A = key.replace("""bn""" , """batch_norm""" )
__A = val
# rename keys
__A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__A = reverse_correct_unfold_reduction_order(__lowercase )
if "norm" in key:
__A = reverse_correct_unfold_norm_order(__lowercase )
model.load_state_dict(__lowercase )
# verify on image
__A = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("""RGB""" )
__A = SegformerImageProcessor()
__A = processor(__lowercase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__A = model(__lowercase )
__A = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__A = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__A = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__A = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__A = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 199 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[str] = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase )
else:
UpperCAmelCase__ : Any = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = tensor[:sequence_length]
else:
UpperCAmelCase__ : str = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(__UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = -1_0_0
snake_case_ = "pt"
def __lowercase ( self : Dict ,A : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : Any = self.tokenizer.pad(
A ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,)
if labels is None:
return batch
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : Optional[Any] = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
UpperCAmelCase__ : Tuple = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
UpperCAmelCase__ : str = [feature["""ner_tags"""] for feature in features]
UpperCAmelCase__ : Any = padding_tensor(A ,-1 ,A ,A )
UpperCAmelCase__ : List[str] = [feature["""original_entity_spans"""] for feature in features]
UpperCAmelCase__ : Optional[int] = padding_tensor(A ,(-1, -1) ,A ,A )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(A ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 65 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE: Optional[int] = 2_9_9_7_9_2_4_5_8
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = symbols('''ct x y z''')
def _a ( lowerCAmelCase )-> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _a ( lowerCAmelCase )-> float:
return 1 / sqrt(1 - beta(lowerCAmelCase ) ** 2 )
def _a ( lowerCAmelCase )-> np.ndarray:
return np.array(
[
[gamma(lowerCAmelCase ), -gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), 0, 0],
[-gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), gamma(lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _a ( lowerCAmelCase , lowerCAmelCase = None )-> np.ndarray:
# Ensure event is not empty
if event is None:
SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE: int = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE: List[Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE: Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""") | 360 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = KandinskyVaaImgaImgPipeline
UpperCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def lowerCAmelCase_ ( self : Dict ):
return 32
@property
def lowerCAmelCase_ ( self : str ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1_00
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
a__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a__ = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowerCAmelCase_ ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self : Any ):
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
a__ = DDIMScheduler(**snake_case__ )
a__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase_ ( self : Optional[int] ,a__ : Union[str, Any] ,a__ : Union[str, Any]=0 ):
a__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(snake_case__ ) ).to(snake_case__ )
a__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(snake_case__ ) ).to(snake_case__ )
a__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
a__ = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(snake_case__ ).startswith("mps" ):
a__ = torch.manual_seed(snake_case__ )
else:
a__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
a__ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase_ ( self : Any ):
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**snake_case__ )
a__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
a__ = pipe(**self.get_dummy_inputs(snake_case__ ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(snake_case__ ) ,return_dict=snake_case__ ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str ):
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a__ = "A red cartoon frog, 4k"
a__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
a__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" ,torch_dtype=torch.floataa )
a__ = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = pipe_prior(
snake_case__ ,generator=snake_case__ ,num_inference_steps=5 ,negative_prompt="" ,).to_tuple()
a__ = pipeline(
image=snake_case__ ,image_embeds=snake_case__ ,negative_image_embeds=snake_case__ ,generator=snake_case__ ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,strength=0.2 ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
| 719 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a__ = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
a__ = int(sequence[i] , 2 )
return sequence
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a__ = gray_code_sequence_string(bit_count - 1 )
a__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a__ = "0" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a__ = "1" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 | 0 |
"""simple docstring"""
a : Optional[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
a : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273 |
"""simple docstring"""
import math
class lowercase:
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
a__ = 0.0
a__ = 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __magic_name__ ( ) -> None:
# Training Examples ( m, n )
a__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
a__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
a__ = SelfOrganizingMap()
a__ = 3
a__ = 0.5
for _ in range(UpperCamelCase ):
for j in range(len(UpperCamelCase ) ):
# training sample
a__ = training_samples[j]
# Compute the winning vector
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# Update the winning vector
a__ = self_organizing_map.update(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# classify test sample
a__ = [0, 0, 0, 1]
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 273 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "T5Config"
def lowerCAmelCase ( _lowerCAmelCase : jnp.array , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = jnp.zeros_like(_lowerCAmelCase )
UpperCAmelCase__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase__ = shifted_input_ids.at[:, 0].set(_lowerCAmelCase )
UpperCAmelCase__ = jnp.where(shifted_input_ids == -100 , _lowerCAmelCase , _lowerCAmelCase )
return shifted_input_ids
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """mt5"""
UpperCAmelCase_ = MTaConfig
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """mt5"""
UpperCAmelCase_ = MTaConfig
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """mt5"""
UpperCAmelCase_ = MTaConfig
| 364 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : int = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , *lowerCamelCase :List[str] , **lowerCamelCase :Tuple ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 186 |
import math
def _lowerCamelCase ( __A : int ) -> int:
if not isinstance(__A , __A ):
_UpperCAmelCase : List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__A )
if number < 1:
_UpperCAmelCase : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(__A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
_UpperCAmelCase : Dict = [3, 5]
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = 3
for block in range(1 , __A ):
for _ in range(__A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE = 0
try:
SCREAMING_SNAKE_CASE = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 186 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A__: Tuple = '''src/diffusers'''
A__: Union[str, Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A__: Tuple = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A__: int = spec.loader.load_module()
def lowerCAmelCase_ ( A_ ,A_):
return line.startswith(A_) or len(A_) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" ,A_) is not None
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Optional[int] = object_name.split(".")
UpperCamelCase__: Optional[int] = 0
# First let's find the module where our object lives.
UpperCamelCase__: Union[str, Any] = parts[i]
while i < len(A_) and not os.path.isfile(os.path.join(A_ ,F"{module}.py")):
i += 1
if i < len(A_):
UpperCamelCase__: Tuple = os.path.join(A_ ,parts[i])
if i >= len(A_):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}.")
with open(os.path.join(A_ ,F"{module}.py") ,"r" ,encoding="utf-8" ,newline="\n") as f:
UpperCamelCase__: Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__: Optional[Any] = ""
UpperCamelCase__: str = 0
for name in parts[i + 1 :]:
while (
line_index < len(A_) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" ,lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A_):
raise ValueError(F" {object_name} does not match any function or class in {module}.")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__: str = line_index
while line_index < len(A_) and _should_continue(lines[line_index] ,A_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__: Union[str, Any] = lines[start_index:line_index]
return "".join(A_)
A__: Optional[int] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A__: int = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A__: List[Any] = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCAmelCase_ ( A_):
UpperCamelCase__: List[Any] = code.split("\n")
UpperCamelCase__: Optional[Any] = 0
while idx < len(A_) and len(lines[idx]) == 0:
idx += 1
if idx < len(A_):
return re.search(R"^(\s*)\S" ,lines[idx]).groups()[0]
return ""
def lowerCAmelCase_ ( A_):
UpperCamelCase__: List[Any] = len(get_indent(A_)) > 0
if has_indent:
UpperCamelCase__: Dict = F"class Bla:\n{code}"
UpperCamelCase__: Dict = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ,preview=A_)
UpperCamelCase__: int = black.format_str(A_ ,mode=A_)
UpperCamelCase__ , UpperCamelCase__: Any = style_docstrings_in_code(A_)
return result[len("class Bla:\n") :] if has_indent else result
def lowerCAmelCase_ ( A_ ,A_=False):
with open(A_ ,"r" ,encoding="utf-8" ,newline="\n") as f:
UpperCamelCase__: Tuple = f.readlines()
UpperCamelCase__: Any = []
UpperCamelCase__: int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A_):
UpperCamelCase__: Dict = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: List[Any] = search.groups()
UpperCamelCase__: Union[str, Any] = find_code_in_diffusers(A_)
UpperCamelCase__: List[str] = get_indent(A_)
UpperCamelCase__: Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__: Optional[Any] = theoretical_indent
UpperCamelCase__: Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__: Union[str, Any] = True
while line_index < len(A_) and should_continue:
line_index += 1
if line_index >= len(A_):
break
UpperCamelCase__: Any = lines[line_index]
UpperCamelCase__: Dict = _should_continue(A_ ,A_) and re.search(F"^{indent}# End copy" ,A_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__: int = lines[start_index:line_index]
UpperCamelCase__: int = "".join(A_)
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__: Dict = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(A_) is None]
UpperCamelCase__: Union[str, Any] = "\n".join(A_)
# Before comparing, use the `replace_pattern` on the original code.
if len(A_) > 0:
UpperCamelCase__: Optional[Any] = replace_pattern.replace("with" ,"").split(",")
UpperCamelCase__: Any = [_re_replace_pattern.search(A_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Tuple = pattern.groups()
UpperCamelCase__: Tuple = re.sub(A_ ,A_ ,A_)
if option.strip() == "all-casing":
UpperCamelCase__: List[str] = re.sub(obja.lower() ,obja.lower() ,A_)
UpperCamelCase__: List[str] = re.sub(obja.upper() ,obja.upper() ,A_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code)
UpperCamelCase__: Optional[Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
UpperCamelCase__: Dict = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__: Tuple = start_index + 1
if overwrite and len(A_) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}.")
with open(A_ ,"w" ,encoding="utf-8" ,newline="\n") as f:
f.writelines(A_)
return diffs
def lowerCAmelCase_ ( A_ = False):
UpperCamelCase__: Any = glob.glob(os.path.join(A_ ,"**/*.py") ,recursive=A_)
UpperCamelCase__: str = []
for filename in all_files:
UpperCamelCase__: Optional[int] = is_copy_consistent(A_ ,A_)
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(A_) > 0:
UpperCamelCase__: Tuple = "\n".join(A_)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
A__: Dict = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__: Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 380 |
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: str = [1]
for i in range(2 ,A_):
factorials.append(factorials[-1] * i)
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase__: Optional[int] = []
UpperCamelCase__: Optional[Any] = list(range(A_))
# Find permutation
while factorials:
UpperCamelCase__: Dict = factorials.pop()
UpperCamelCase__ , UpperCamelCase__: Optional[int] = divmod(A_ ,A_)
permutation.append(elements[number])
elements.remove(elements[number])
permutation.append(elements[0])
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 380 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Dict , a : Optional[int] , a : List[Any] , a : Optional[Any] , a : Optional[int] = 1.0 , a : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : int = initial_learning_rate
lowerCAmelCase__ : Optional[int] = warmup_steps
lowerCAmelCase__ : List[Any] = power
lowerCAmelCase__ : Optional[Any] = decay_schedule_fn
lowerCAmelCase__ : int = name
def __call__( self : Optional[Any] , a : Any ):
'''simple docstring'''
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase__ : Union[str, Any] = tf.cast(__UpperCamelCase , tf.floataa )
lowerCAmelCase__ : Any = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase__ : Any = global_step_float / warmup_steps_float
lowerCAmelCase__ : List[str] = self.initial_learning_rate * tf.math.pow(__UpperCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__UpperCamelCase , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.999 , SCREAMING_SNAKE_CASE_ = 1e-8 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> str:
lowerCAmelCase__ : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , )
if num_warmup_steps:
lowerCAmelCase__ : List[Any] = WarmUp(
initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , )
if weight_decay_rate > 0.0:
lowerCAmelCase__ : Optional[Any] = AdamWeightDecay(
learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowercase__ , )
else:
lowerCAmelCase__ : int = tf.keras.optimizers.Adam(
learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A__ ( __snake_case ):
def __init__( self : int , a : List[Any] = 0.0_0_1 , a : Union[str, Any] = 0.9 , a : Optional[Any] = 0.9_9_9 , a : Union[str, Any] = 1E-7 , a : List[str] = False , a : Tuple = 0.0 , a : Optional[int] = None , a : Tuple = None , a : Any = "AdamWeightDecay" , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = weight_decay_rate
lowerCAmelCase__ : Tuple = include_in_weight_decay
lowerCAmelCase__ : int = exclude_from_weight_decay
@classmethod
def _lowerCamelCase ( cls : Any , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {'WarmUp': WarmUp}
return super(__UpperCamelCase , cls ).from_config(__UpperCamelCase , custom_objects=__UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
super(__UpperCamelCase , self )._prepare_local(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase__ : List[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def _lowerCamelCase ( self : Dict , a : int , a : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def _lowerCamelCase ( self : Optional[int] , a : Any , a : int=None , **a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = list(zip(*__UpperCamelCase ) )
return super(__UpperCamelCase , self ).apply_gradients(zip(__UpperCamelCase , __UpperCamelCase ) , name=__UpperCamelCase , **__UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase__ : List[Any] = apply_state or {}
lowerCAmelCase__ : int = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase__ : Any = self._fallback_apply_state(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCamelCase ( self : Tuple , a : Dict , a : Tuple , a : Any=None ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self._decay_weights_op(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCamelCase , self )._resource_apply_dense(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _lowerCamelCase ( self : Any , a : List[str] , a : Union[str, Any] , a : Optional[Any] , a : Dict=None ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , __UpperCamelCase )
lowerCAmelCase__ : str = self._decay_weights_op(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCamelCase , self )._resource_apply_sparse(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def _lowerCamelCase ( self : Optional[Any] , a : Tuple ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__UpperCamelCase , __UpperCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__UpperCamelCase , __UpperCamelCase ) is not None:
return False
return True
class A__ ( __snake_case ):
def __init__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Optional[Any] = None
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self._accum_steps is None:
lowerCAmelCase__ : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[str] , a : Any ):
'''simple docstring'''
if not self._gradients:
lowerCAmelCase__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__UpperCamelCase ) , trainable=__UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__UpperCamelCase ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(__UpperCamelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , __UpperCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__UpperCamelCase )
self._accum_steps.assign_add(1 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__UpperCamelCase ) ) | 702 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
class snake_case__ ( nn.Module):
def __init__( self : str , _A : int=3 , _A : List[str]=3 , _A : Tuple=("DownEncoderBlock2D",) , _A : Optional[int]=(64,) , _A : Tuple=2 , _A : Union[str, Any]=32 , _A : Any="silu" , _A : Optional[Any]=True , ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : Union[str, Any] = layers_per_block
UpperCAmelCase_ : int = torch.nn.Convad(
_A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([] )
# down
UpperCAmelCase_ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(_A ):
UpperCAmelCase_ : str = output_channel
UpperCAmelCase_ : Optional[Any] = block_out_channels[i]
UpperCAmelCase_ : Optional[int] = i == len(_A ) - 1
UpperCAmelCase_ : List[Any] = get_down_block(
_A , num_layers=self.layers_per_block , in_channels=_A , out_channels=_A , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_A , resnet_groups=_A , attention_head_dim=_A , temb_channels=_A , )
self.down_blocks.append(_A )
# mid
UpperCAmelCase_ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_A , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_A , temb_channels=_A , )
# out
UpperCAmelCase_ : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_A , eps=1e-6 )
UpperCAmelCase_ : int = nn.SiLU()
UpperCAmelCase_ : List[str] = 2 * out_channels if double_z else out_channels
UpperCAmelCase_ : str = nn.Convad(block_out_channels[-1] , _A , 3 , padding=1 )
UpperCAmelCase_ : List[Any] = False
def A ( self : str , _A : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = x
UpperCAmelCase_ : Optional[Any] = self.conv_in(_A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_A : str ):
def custom_forward(*_A : Optional[int] ):
return module(*_A )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
UpperCAmelCase_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(_A ) , _A , use_reentrant=_A )
# middle
UpperCAmelCase_ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _A , use_reentrant=_A )
else:
for down_block in self.down_blocks:
UpperCAmelCase_ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_A ) , _A )
# middle
UpperCAmelCase_ : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _A )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ : Union[str, Any] = down_block(_A )
# middle
UpperCAmelCase_ : Optional[Any] = self.mid_block(_A )
# post-process
UpperCAmelCase_ : Optional[int] = self.conv_norm_out(_A )
UpperCAmelCase_ : Optional[Any] = self.conv_act(_A )
UpperCAmelCase_ : int = self.conv_out(_A )
return sample
class snake_case__ ( nn.Module):
def __init__( self : List[Any] , _A : Dict=3 , _A : int=3 , _A : Optional[int]=("UpDecoderBlock2D",) , _A : str=(64,) , _A : List[str]=2 , _A : Optional[int]=32 , _A : str="silu" , _A : Union[str, Any]="group" , ) -> Any:
super().__init__()
UpperCAmelCase_ : Optional[Any] = layers_per_block
UpperCAmelCase_ : List[str] = nn.Convad(
_A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([] )
UpperCAmelCase_ : int = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase_ : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_A , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_A , temb_channels=_A , )
# up
UpperCAmelCase_ : List[Any] = list(reversed(_A ) )
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_A ):
UpperCAmelCase_ : Optional[Any] = output_channel
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[i]
UpperCAmelCase_ : Union[str, Any] = i == len(_A ) - 1
UpperCAmelCase_ : Any = get_up_block(
_A , num_layers=self.layers_per_block + 1 , in_channels=_A , out_channels=_A , prev_output_channel=_A , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_A , resnet_groups=_A , attention_head_dim=_A , temb_channels=_A , resnet_time_scale_shift=_A , )
self.up_blocks.append(_A )
UpperCAmelCase_ : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ : str = SpatialNorm(block_out_channels[0] , _A )
else:
UpperCAmelCase_ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_A , eps=1e-6 )
UpperCAmelCase_ : str = nn.SiLU()
UpperCAmelCase_ : Any = nn.Convad(block_out_channels[0] , _A , 3 , padding=1 )
UpperCAmelCase_ : Any = False
def A ( self : str , _A : str , _A : Tuple=None ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = z
UpperCAmelCase_ : Optional[int] = self.conv_in(_A )
UpperCAmelCase_ : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_A : Dict ):
def custom_forward(*_A : Tuple ):
return module(*_A )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
UpperCAmelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _A , _A , use_reentrant=_A )
UpperCAmelCase_ : Union[str, Any] = sample.to(_A )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_A ) , _A , _A , use_reentrant=_A )
else:
# middle
UpperCAmelCase_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _A , _A )
UpperCAmelCase_ : Optional[Any] = sample.to(_A )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(_A ) , _A , _A )
else:
# middle
UpperCAmelCase_ : List[str] = self.mid_block(_A , _A )
UpperCAmelCase_ : List[Any] = sample.to(_A )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ : str = up_block(_A , _A )
# post-process
if latent_embeds is None:
UpperCAmelCase_ : int = self.conv_norm_out(_A )
else:
UpperCAmelCase_ : List[Any] = self.conv_norm_out(_A , _A )
UpperCAmelCase_ : Any = self.conv_act(_A )
UpperCAmelCase_ : str = self.conv_out(_A )
return sample
class snake_case__ ( nn.Module):
def __init__( self : Any , _A : Tuple , _A : str , _A : Optional[Any] , _A : Tuple=None , _A : List[Any]="random" , _A : str=False , _A : Optional[Any]=True ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ : int = n_e
UpperCAmelCase_ : Tuple = vq_embed_dim
UpperCAmelCase_ : int = beta
UpperCAmelCase_ : str = legacy
UpperCAmelCase_ : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase_ : Tuple = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase_ : Optional[int] = self.used.shape[0]
UpperCAmelCase_ : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ : int = self.re_embed
UpperCAmelCase_ : Optional[Any] = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
UpperCAmelCase_ : List[str] = n_e
UpperCAmelCase_ : Optional[int] = sane_index_shape
def A ( self : Optional[Any] , _A : Any ) -> Optional[Any]:
UpperCAmelCase_ : int = inds.shape
assert len(_A ) > 1
UpperCAmelCase_ : int = inds.reshape(ishape[0] , -1 )
UpperCAmelCase_ : List[Any] = self.used.to(_A )
UpperCAmelCase_ : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ : Union[str, Any] = match.argmax(-1 )
UpperCAmelCase_ : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase_ : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase_ : str = self.unknown_index
return new.reshape(_A )
def A ( self : int , _A : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = inds.shape
assert len(_A ) > 1
UpperCAmelCase_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
UpperCAmelCase_ : Tuple = self.used.to(_A )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ : Dict = 0 # simply set to zero
UpperCAmelCase_ : Optional[int] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _A )
return back.reshape(_A )
def A ( self : int , _A : Any ) -> Tuple:
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase_ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase_ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ : List[Any] = torch.argmin(torch.cdist(_A , self.embedding.weight ) , dim=1 )
UpperCAmelCase_ : int = self.embedding(_A ).view(z.shape )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[int] = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase_ : Dict = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase_ : int = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase_ : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase_ : Any = self.remap_to_used(_A )
UpperCAmelCase_ : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase_ : Dict = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A ( self : List[Any] , _A : Tuple , _A : Dict ) -> str:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase_ : Tuple = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase_ : Tuple = self.unmap_to_all(_A )
UpperCAmelCase_ : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase_ : Any = self.embedding(_A )
if shape is not None:
UpperCAmelCase_ : Optional[int] = z_q.view(_A )
# reshape back to match original input shape
UpperCAmelCase_ : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__ ( UpperCamelCase):
def __init__( self : str , _A : Optional[int] , _A : str=False ) -> Optional[Any]:
UpperCAmelCase_ : int = parameters
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.chunk(_A , 2 , dim=1 )
UpperCAmelCase_ : int = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase_ : Tuple = deterministic
UpperCAmelCase_ : int = torch.exp(0.5 * self.logvar )
UpperCAmelCase_ : Any = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase_ : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A ( self : Any , _A : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase_ : Union[str, Any] = randn_tensor(
self.mean.shape , generator=_A , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase_ : List[str] = self.mean + self.std * sample
return x
def A ( self : Tuple , _A : str=None ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A ( self : int , _A : List[Any] , _A : int=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase_ : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_A )
def A ( self : List[Any] ) -> Any:
return self.mean
| 541 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : Optional[Any] , _A : int , _A : List[Any]=7 , _A : Tuple=3 , _A : int=18 , _A : Union[str, Any]=30 , _A : Any=4_00 , _A : List[Any]=True , _A : Optional[int]=None , _A : Optional[Any]=True , _A : Union[str, Any]=None , ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Union[str, Any] = do_center_crop
UpperCAmelCase_ : Any = crop_size
def A ( self : List[Any] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = MobileNetVaImageProcessor if is_vision_available() else None
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = MobileNetVaImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Dict:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : int ) -> List[str]:
pass
def A ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : str ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 541 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = get_activation('swish' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = get_activation('silu' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = get_activation('mish' )
self.assertIsInstance(_A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = get_activation('gelu' )
self.assertIsInstance(_A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 716 |
"""simple docstring"""
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( lowerCAmelCase__ ):
return vector * sigmoid(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : List[str] = text_generator('''This is a test''' , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
snake_case__ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__SCREAMING_SNAKE_CASE : List[str] = text_generator('''This is a test''' , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ )
self.assertEqual(
snake_case__ , [
{'''generated_token_ids''': ANY(snake_case__ )},
{'''generated_token_ids''': ANY(snake_case__ )},
] , )
__SCREAMING_SNAKE_CASE : Dict = text_generator.model.config.eos_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
__SCREAMING_SNAKE_CASE : Any = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , )
self.assertEqual(
snake_case__ , [
[
{'''generated_token_ids''': ANY(snake_case__ )},
{'''generated_token_ids''': ANY(snake_case__ )},
],
[
{'''generated_token_ids''': ANY(snake_case__ )},
{'''generated_token_ids''': ANY(snake_case__ )},
],
] , )
@require_tf
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__SCREAMING_SNAKE_CASE : Dict = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = '''Hello I believe in'''
__SCREAMING_SNAKE_CASE : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator(snake_case__ )
self.assertEqual(
snake_case__ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__SCREAMING_SNAKE_CASE : str = text_generator(snake_case__ , stop_sequence=''' fe''' )
self.assertEqual(snake_case__ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCAmelCase__ ( self : str , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = text_generator.model
__SCREAMING_SNAKE_CASE : int = text_generator.tokenizer
__SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' )
self.assertEqual(snake_case__ , [{'''generated_text''': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{'''generated_text''': ANY(snake_case__ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(task='''text-generation''' , model=snake_case__ , tokenizer=snake_case__ , return_full_text=snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator('''This is a test''' )
self.assertEqual(snake_case__ , [{'''generated_text''': ANY(snake_case__ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__SCREAMING_SNAKE_CASE : str = text_generator('''This is a test''' , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{'''generated_text''': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__SCREAMING_SNAKE_CASE : Any = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'''generated_text''': ANY(snake_case__ )}, {'''generated_text''': ANY(snake_case__ )}],
[{'''generated_text''': ANY(snake_case__ )}, {'''generated_text''': ANY(snake_case__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__SCREAMING_SNAKE_CASE : str = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'''generated_text''': ANY(snake_case__ )}, {'''generated_text''': ANY(snake_case__ )}],
[{'''generated_text''': ANY(snake_case__ )}, {'''generated_text''': ANY(snake_case__ )}],
] , )
with self.assertRaises(snake_case__ ):
__SCREAMING_SNAKE_CASE : Tuple = text_generator('''test''' , return_full_text=snake_case__ , return_text=snake_case__ )
with self.assertRaises(snake_case__ ):
__SCREAMING_SNAKE_CASE : Dict = text_generator('''test''' , return_full_text=snake_case__ , return_tensors=snake_case__ )
with self.assertRaises(snake_case__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = text_generator('''test''' , return_text=snake_case__ , return_tensors=snake_case__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__SCREAMING_SNAKE_CASE : List[str] = text_generator('''''' )
self.assertEqual(snake_case__ , [{'''generated_text''': ANY(snake_case__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__SCREAMING_SNAKE_CASE : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__SCREAMING_SNAKE_CASE : Dict = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__SCREAMING_SNAKE_CASE : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(snake_case__ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
__SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : int = pipe('''This is a test''' )
self.assertEqual(
snake_case__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__SCREAMING_SNAKE_CASE : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : Dict = pipe('''This is a test''' )
self.assertEqual(
snake_case__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__SCREAMING_SNAKE_CASE : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
snake_case__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=snake_case__ , top_p=0.5 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello world'''
__SCREAMING_SNAKE_CASE : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger('''transformers.generation.utils''' )
__SCREAMING_SNAKE_CASE : Dict = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case__ ) as cl:
__SCREAMING_SNAKE_CASE : Any = text_generator(snake_case__ , max_length=10 , max_new_tokens=1 )
self.assertIn(snake_case__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case__ ) as cl:
__SCREAMING_SNAKE_CASE : List[str] = text_generator(snake_case__ , max_new_tokens=1 )
self.assertNotIn(snake_case__ , cl.out )
with CaptureLogger(snake_case__ ) as cl:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator(snake_case__ , max_length=10 )
self.assertNotIn(snake_case__ , cl.out )
| 74 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['pixel_values']
def __init__( self : List[str] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : bool = True , **snake_case__ : List[Any] , ) -> None:
super().__init__(**snake_case__ )
_lowerCamelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase = do_convert_rgb
def _snake_case ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ) -> np.ndarray:
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_lowerCamelCase = (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ) -> int:
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ) -> np.ndarray:
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : List[str] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : bool = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : str , ) -> PIL.Image.Image:
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCamelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
_lowerCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ )
return encoded_outputs | 544 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ):
snake_case__ : Optional[Any] = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , config_name=snake_case_ )
snake_case__ : Tuple = GenerationConfig.from_pretrained(snake_case_ , config_name=snake_case_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , snake_case_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = AutoConfig.from_pretrained("""gpt2""" )
snake_case__ : Tuple = GenerationConfig.from_model_config(snake_case_ )
snake_case__ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case_ , snake_case_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = GenerationConfig()
snake_case__ : int = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
snake_case__ : str = copy.deepcopy(snake_case_ )
snake_case__ : Dict = generation_config.update(**snake_case_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case_ , snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case_ , {"""foo""": """bar"""} )
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = GenerationConfig()
snake_case__ : Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(snake_case_ )
snake_case__ : str = GenerationConfig.from_pretrained(snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
snake_case__ : Tuple = GenerationConfig.from_model_config(snake_case_ )
assert not hasattr(snake_case_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCamelCase ( self : int ):
snake_case__ : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case_ )
self.assertEqual(default_config.num_beams , 1 )
snake_case__ : Any = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
snake_case__ : str = GenerationConfig.from_pretrained(snake_case_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCamelCase ( cls : Optional[Any] ):
snake_case__ : Optional[Any] = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCamelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCamelCase ( self : Any ):
snake_case__ : Any = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
snake_case__ : str = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="""test-generation-config""" , push_to_hub=snake_case_ , use_auth_token=self._token )
snake_case__ : Optional[int] = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Any = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
snake_case__ : Tuple = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=snake_case_ , use_auth_token=self._token )
snake_case__ : Any = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
| 301 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __snake_case( _lowerCAmelCase = 5_000 ) -> int:
snake_case__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case__ : Any = pentagonal_nums[j]
snake_case__ : Any = pentagonal_i + pentagonal_j
snake_case__ : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 301 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class _snake_case:
__snake_case: List[str] = 42
__snake_case: Optional[int] = field(default_factory=lowerCamelCase__ )
__snake_case: str = field(default_factory=lowerCamelCase__ )
def _UpperCamelCase (self : Dict , a : Union[str, Any] , a : Tuple , a : Tuple ) -> Tuple:
"""simple docstring"""
A__ = len(list(m.modules() ) ) == 1 or isinstance(__lowerCamelCase , nn.Convad ) or isinstance(__lowerCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowerCamelCase )
def __call__(self : Union[str, Any] , a : str ) -> Optional[Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowerCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _UpperCamelCase (self : List[str] ) -> Dict:
"""simple docstring"""
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _snake_case:
__snake_case: Tuple = 42
__snake_case: str = 42
__snake_case: Any = 1
__snake_case: str = field(default_factory=lowerCamelCase__ )
__snake_case: str = field(default_factory=lowerCamelCase__ )
__snake_case: Any = True
def __call__(self : Union[str, Any] , a : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = Tracker(self.dest )(__lowerCamelCase ).parametrized
A__ = Tracker(self.src )(__lowerCamelCase ).parametrized
A__ = list(filter(lambda a : type(__lowerCamelCase ) not in self.src_skip , __lowerCamelCase ) )
A__ = list(filter(lambda a : type(__lowerCamelCase ) not in self.dest_skip , __lowerCamelCase ) )
if len(__lowerCamelCase ) != len(__lowerCamelCase ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(__lowerCamelCase )} operations while"""
f""" destination module has {len(__lowerCamelCase )}.""" )
for dest_m, src_m in zip(__lowerCamelCase , __lowerCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _snake_case( nn.Module ):
def __init__(self : List[str] , a : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
A__ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
A__ = len(__lowerCamelCase ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
A__ = nn.ModuleDict(__lowerCamelCase )
def _UpperCamelCase (self : List[Any] , a : Optional[int] ) -> int:
"""simple docstring"""
return get_trunk_forward_outputs(
__lowerCamelCase , out_feat_keys=__lowerCamelCase , feature_blocks=self._feature_blocks , )
class _snake_case( lowerCamelCase__ ):
def _UpperCamelCase (self : Tuple , a : int ) -> str:
"""simple docstring"""
A__ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self : Tuple , a : Optional[Any] ) -> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
if x not in self:
A__ = self.convert_name_to_timm(__lowerCamelCase )
A__ = partial(lambda: (timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval(), None) )
else:
A__ = super().__getitem__(__lowerCamelCase )
return val
class _snake_case( lowerCamelCase__ ):
def __getitem__(self : Any , a : str ) -> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
A__ = RegNetModel
else:
A__ = RegNetForImageClassification
return val
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
for from_key, to_key in keys:
A__ = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = True ,):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
A__ = from_model_func()
A__ = our_model_func(lowerCamelCase_ ).eval()
A__ = ModuleTransfer(src=lowerCamelCase_ ,dest=lowerCamelCase_ ,raise_if_mismatch=lowerCamelCase_ )
A__ = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCamelCase_ )
if from_state_dict is not None:
A__ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
A__ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
A__ = manually_copy_vissl_head(lowerCamelCase_ ,our_model.state_dict() ,lowerCamelCase_ )
our_model.load_state_dict(lowerCamelCase_ )
A__ = our_model(lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ )
A__ = (
our_outputs.logits if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else our_outputs.last_hidden_state
)
A__ = from_model(lowerCamelCase_ )
A__ = from_output[-1] if type(lowerCamelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
A__ = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message='Add model' ,use_temp_dir=lowerCamelCase_ ,)
A__ = 224 if '''seer''' not in name else 384
# we can use the convnext one
A__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ,size=lowerCamelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message='Add image processor' ,use_temp_dir=lowerCamelCase_ ,)
print(F"""Pushed {name}""" )
def _A ( UpperCAmelCase ,UpperCAmelCase = None ,UpperCAmelCase = True ):
'''simple docstring'''
A__ = '''imagenet-1k-id2label.json'''
A__ = 1000
A__ = (1, num_labels)
A__ = '''huggingface/label-files'''
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ ,lowerCamelCase_ ,repo_type='dataset' ) ) ,'r' ) )
A__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = partial(lowerCamelCase_ ,num_labels=lowerCamelCase_ ,idalabel=lowerCamelCase_ ,labelaid=lowerCamelCase_ )
A__ = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ,layer_type='x' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] ,hidden_sizes=[32, 64, 160, 384] ,groups_width=16 ,layer_type='x' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] ,hidden_sizes=[48, 96, 240, 528] ,groups_width=24 ,layer_type='x' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] ,hidden_sizes=[64, 128, 288, 672] ,groups_width=16 ,layer_type='x' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] ,hidden_sizes=[72, 168, 408, 912] ,groups_width=24 ,layer_type='x' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] ,hidden_sizes=[96, 192, 432, 1008] ,groups_width=48 ,layer_type='x' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] ,hidden_sizes=[80, 240, 560, 1360] ,groups_width=40 ,layer_type='x' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 392, 784, 1624] ,groups_width=56 ,layer_type='x' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] ,hidden_sizes=[80, 240, 720, 1920] ,groups_width=120 ,layer_type='x' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ,layer_type='x' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] ,hidden_sizes=[256, 512, 896, 2048] ,groups_width=128 ,layer_type='x' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] ,hidden_sizes=[336, 672, 1344, 2520] ,groups_width=168 ,layer_type='x' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] ,hidden_sizes=[48, 104, 208, 440] ,groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] ,hidden_sizes=[48, 112, 256, 608] ,groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] ,hidden_sizes=[64, 128, 320, 768] ,groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] ,hidden_sizes=[48, 120, 336, 888] ,groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] ,hidden_sizes=[72, 216, 576, 1512] ,groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] ,hidden_sizes=[128, 192, 512, 1088] ,groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] ,hidden_sizes=[144, 288, 576, 1296] ,groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 448, 896, 2016] ,groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] ,hidden_sizes=[224, 448, 1232, 3024] ,groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 11110, 28280] ,groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 11110, 28280] ,groups_width=1010 ),
}
A__ = NameToOurModelFuncMap()
A__ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCAmelCase ,UpperCAmelCase ) -> Tuple[nn.Module, Dict]:
A__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,model_dir=str(lowerCamelCase_ ) ,map_location='cpu' )
A__ = model_func()
# check if we have a head, if yes add it
A__ = files['''classy_state_dict''']['''base_model''']['''model''']
A__ = model_state_dict['''trunk''']
model.load_state_dict(lowerCamelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=6_20.83 ,w_m=2.52 ) ) ) ,)
# IN1K finetuned
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
A__ = partial(
lowerCamelCase_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=6_20.83 ,w_m=2.52 ) ) ) ,)
if model_name:
convert_weight_and_push(
lowerCamelCase_ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,names_to_config[model_name] ,lowerCamelCase_ ,lowerCamelCase_ ,)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,)
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 531 |
def lowerCAmelCase__ ( lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Tuple = arr.index(max(arr[0:cur]))
# Reverse from 0 to mi
lowerCAmelCase__ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_)]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_)]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case : List[Any] =input('Enter numbers separated by a comma:\n').strip()
__snake_case : Dict =[int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 647 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
lowerCamelCase__ = tuple[int, int]
class A__ :
def __init__( self ):
__lowerCAmelCase : int = []
__lowerCAmelCase : List[str] = set()
def __lowerCamelCase ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def __lowerCamelCase ( self ):
return len(self.elements ) == 0
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
__lowerCAmelCase : Optional[Any] = []
((__lowerCAmelCase) , (__lowerCAmelCase)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowerCAmelCase) , (__lowerCAmelCase)) : List[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if item in self.set:
self.set.remove(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = []
((__lowerCAmelCase) , (__lowerCAmelCase)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __lowerCamelCase ( self ):
return self.elements[0][1]
def __lowerCamelCase ( self ):
((__lowerCAmelCase) , (__lowerCAmelCase)) : Any = heapq.heappop(self.elements )
self.set.remove(_SCREAMING_SNAKE_CASE )
return (priority, item)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# euclidean distance
__lowerCAmelCase : Optional[Any] = np.array(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# integer division by time variable
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowerCAmelCase : Tuple = '#'
__lowerCAmelCase : Tuple = '-'
__lowerCAmelCase : Dict = back_pointer[goal]
while x != start:
((__lowerCAmelCase) , (__lowerCAmelCase)) : str = x
# print(x)
__lowerCAmelCase : Optional[Any] = '-'
__lowerCAmelCase : List[str] = back_pointer[x]
__lowerCAmelCase : Optional[int] = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
__lowerCAmelCase : Optional[Any] = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
__lowerCAmelCase : Optional[Any] = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase (_UpperCamelCase ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((__lowerCAmelCase) , (__lowerCAmelCase)) : Dict = s
__lowerCAmelCase : Optional[int] = (x - 1, y)
__lowerCAmelCase : Dict = (x + 1, y)
__lowerCAmelCase : int = (x, y + 1)
__lowerCAmelCase : Optional[int] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = -1
__lowerCAmelCase : str = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowerCAmelCase : str = g_function[s] + 1
__lowerCAmelCase : Tuple = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ():
__lowerCAmelCase : int = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCamelCase__ = make_common_ground()
lowerCamelCase__ = blocks_blk
# hyper parameters
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = 20
lowerCamelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (n - 1, n - 1)
lowerCamelCase__ = 1
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = {start: 0, goal: float('inf' )}
__lowerCAmelCase : Any = {start: -1, goal: -1}
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : Optional[int] = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase : list[int] = []
__lowerCAmelCase : list[int] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase , __lowerCAmelCase : Tuple = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 549 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( _lowerCamelCase):
A_ : Any = 'mctct'
def __init__( self , _SCREAMING_SNAKE_CASE=80_65 , _SCREAMING_SNAKE_CASE=15_36 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=61_44 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3_84 , _SCREAMING_SNAKE_CASE=9_20 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=(7,) , _SCREAMING_SNAKE_CASE=(3,) , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="sum" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = attention_head_dim
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = pad_token_id
__lowerCAmelCase : Union[str, Any] = bos_token_id
__lowerCAmelCase : int = eos_token_id
__lowerCAmelCase : Optional[int] = conv_glu_dim
__lowerCAmelCase : List[Any] = conv_dropout
__lowerCAmelCase : Tuple = num_conv_layers
__lowerCAmelCase : Tuple = input_feat_per_channel
__lowerCAmelCase : Optional[int] = input_channels
__lowerCAmelCase : str = conv_channels
__lowerCAmelCase : str = ctc_loss_reduction
__lowerCAmelCase : Tuple = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." ) | 549 | 1 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 621 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase )] )
lowerCamelCase__ : str = np.array(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase ) ) , x.transpose() ) , UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = (1, 2, 1)
lowerCamelCase__ : List[str] = (1, 1, 0, 7)
lowerCamelCase__ : Union[str, Any] = SARIMAX(
UpperCAmelCase , exog=UpperCAmelCase , order=UpperCAmelCase , seasonal_order=UpperCAmelCase )
lowerCamelCase__ : int = model.fit(disp=UpperCAmelCase , maxiter=600 , method='''nm''' )
lowerCamelCase__ : Optional[int] = model_fit.predict(1 , len(UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : Dict = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = regressor.predict(UpperCAmelCase )
return y_pred[0]
def _a ( UpperCAmelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCamelCase__ : Any = np.percentile(UpperCAmelCase , 25 )
lowerCamelCase__ : Any = np.percentile(UpperCAmelCase , 75 )
lowerCamelCase__ : Optional[Any] = qa - qa
lowerCamelCase__ : Any = qa - (iqr * 0.1)
return low_lim
def _a ( UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Any = 0
for i in list_vote:
if i > actual_result:
lowerCamelCase__ : List[str] = not_safe + 1
else:
if abs(abs(UpperCAmelCase ) - abs(UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
_A : Dict = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_A : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
_A : str = normalize_df[:, 2].tolist()
_A : Union[str, Any] = normalize_df[:, 0].tolist()
_A : Union[str, Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A : int = normalize_df[:, [1, 2]].tolist()
_A : str = x[: len(x) - 1]
_A : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
_A : Any = total_date[: len(total_date) - 1]
_A : List[str] = total_user[: len(total_user) - 1]
_A : List[str] = total_match[: len(total_match) - 1]
_A : Any = total_date[len(total_date) - 1 :]
_A : Optional[int] = total_user[len(total_user) - 1 :]
_A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
_A : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A : Union[str, Any] = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 315 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def _snake_case ( lowerCamelCase__ : np.ndarray ) -> bool:
return np.array_equal(lowerCamelCase__ , matrix.conjugate().T )
def _snake_case ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray ) -> Any:
lowerCamelCase_ : Dict =v.conjugate().T
lowerCamelCase_ : List[str] =v_star.dot(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase__ )) / (v_star.dot(lowerCamelCase__ ))
def _snake_case ( ) -> None:
lowerCamelCase_ : Dict =np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCamelCase_ : Optional[Any] =np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCamelCase_ : int =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 244 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Any , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : Union[str, Any] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : Optional[int] =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Tuple =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Optional[int] =self.encoder(snake_case__ )
lowerCamelCase_ : Dict =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int =self.quantize(snake_case__ )
else:
lowerCamelCase_ : Optional[int] =h
lowerCamelCase_ : Optional[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : List[str] =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : List[str] =sample
lowerCamelCase_ : Any =self.encode(snake_case__ ).latents
lowerCamelCase_ : List[Any] =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 244 | 1 |
"""simple docstring"""
import heapq
def lowercase__ ( lowerCAmelCase__ : dict ) -> set[int]:
'''simple docstring'''
a__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase__ , [-1 * len(lowerCAmelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
a__ : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a__ : Optional[Any] = heapq.heappop(lowerCAmelCase__ )[1][0]
chosen_vertices.add(lowerCAmelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a__ : Union[str, Any] = elem[1][1].index(lowerCAmelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | 642 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
a__ , a__ : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowercase__ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Any = {}
a__ : Tuple = R".*sequential.(\d+).*"
a__ : Tuple = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ : str = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
a__ : Optional[Any] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
a__ : int = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(lowerCAmelCase__ )//3}.linear." )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Tuple = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a__ : List[Any] = 1 if projecton_layer == 0 else 2
a__ : Dict = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
a__ : List[Any] = value
a__ : List[Any] = mixed_qkv.size(0 ) // 3
a__ : Optional[int] = mixed_qkv[:qkv_dim]
a__ : List[str] = mixed_qkv[qkv_dim : qkv_dim * 2]
a__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
a__ : Tuple = query_layer
a__ : int = key_layer
a__ : Optional[int] = value_layer
else:
a__ : List[str] = value
return model_state_dict
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=False ) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
a__ : Optional[int] = clap_model.state_dict()
a__ : Optional[Any] = rename_state_dict(lowerCAmelCase__ )
a__ : Union[str, Any] = ClapConfig()
a__ : Dict = enable_fusion
a__ : Any = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__UpperCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 642 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
def __A ( a_ : Namespace )-> Tuple:
'''simple docstring'''
return TrainCommand(a_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=lowerCamelCase_ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=lowerCamelCase_ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=lowerCamelCase_ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=lowerCamelCase_ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=lowerCamelCase_ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=lowerCamelCase_ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=lowerCamelCase_ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=lowerCamelCase_ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=lowerCamelCase_ , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=lowerCamelCase_ , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=lowerCamelCase_ , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=lowerCamelCase_ , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self :Union[str, Any] , lowerCamelCase_ :Namespace ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE : Optional[int] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = args.output
SCREAMING_SNAKE_CASE : List[str] = args.column_label
SCREAMING_SNAKE_CASE : Dict = args.column_text
SCREAMING_SNAKE_CASE : Dict = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Dict = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Any = args.validation_split
SCREAMING_SNAKE_CASE : Union[str, Any] = args.train_batch_size
SCREAMING_SNAKE_CASE : List[Any] = args.valid_batch_size
SCREAMING_SNAKE_CASE : List[str] = args.learning_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = args.adam_epsilon
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class snake_case :
lowerCAmelCase__ :Dict = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ :Union[str, Any] = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ :str = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCAmelCase__ :Any = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class snake_case :
lowerCAmelCase__ :Tuple = field(
default=__UpperCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ :Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
lowerCAmelCase__ :Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
lowerCAmelCase__ :Dict = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ :List[Any] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase__ :Tuple = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
lowerCAmelCase__ :Optional[Any] = field(
default=__UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCAmelCase__ :List[Any] = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ :int = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ :Dict = field(
default=__UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowercase__ = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = train_dataset.features['label'].names
if training_args.do_eval:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = eval_dataset.features['label'].names
if training_args.do_predict:
lowercase__ = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = predict_dataset.features['label'].names
# Labels
lowercase__ = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE__ ,idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
def preprocess_function(_snake_case : Union[str, Any] ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE__ ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE__ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_train_samples )
lowercase__ = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowercase__ = train_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_eval_samples )
lowercase__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowercase__ = eval_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_predict_samples )
lowercase__ = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowercase__ = predict_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowercase__ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : Any ):
lowercase__ = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE__ ) else p.predictions
lowercase__ = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(SCREAMING_SNAKE_CASE__ ,metric_key_prefix="predict" )
lowercase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowercase__ = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE__ )
lowercase__ = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
lowercase__ = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase__ = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 267 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_UpperCamelCase : Optional[int] = get_logger(__name__)
class snake_case__ ( enum.Enum):
a_ = "all_checks"
a_ = "basic_checks"
a_ = "no_checks"
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
def __UpperCAmelCase ( A : Optional[dict] , A : dict , A : List[Any]=None ) -> Any:
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(A ) - set(A ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A ) - set(A ) ) )
if len(set(A ) - set(A ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A ) - set(A ) ) )
UpperCAmelCase_ : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase_ : Optional[int] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(A ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
class snake_case__ ( UpperCamelCase):
pass
def __UpperCAmelCase ( A : Optional[dict] , A : dict ) -> List[str]:
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(A ) - set(A ) ) > 0:
raise ExpectedMoreSplits(str(set(A ) - set(A ) ) )
if len(set(A ) - set(A ) ) > 0:
raise UnexpectedSplits(str(set(A ) - set(A ) ) )
UpperCAmelCase_ : Optional[Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A ) > 0:
raise NonMatchingSplitsSizesError(str(A ) )
logger.info('''All the splits matched successfully.''' )
def __UpperCAmelCase ( A : str , A : bool = True ) -> dict:
if record_checksum:
UpperCAmelCase_ : Any = shaaaa()
with open(A , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B'''''' ):
m.update(A )
UpperCAmelCase_ : Any = m.hexdigest()
else:
UpperCAmelCase_ : Optional[Any] = None
return {"num_bytes": os.path.getsize(A ), "checksum": checksum}
def __UpperCAmelCase ( A : Dict ) -> Any:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 703 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
UpperCamelCase__ = field(
default='linear' , metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 72 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar('''T''')
class __lowercase( Generic[T] ):
'''simple docstring'''
__a : deque[T] # Cache store of keys
__a : set[T] # References of the keys in cache
__a : int = 10 # Maximum capacity of cache
def __init__( self , __a ):
__lowerCamelCase : List[str] = deque()
__lowerCamelCase : Tuple = set()
if not n:
__lowerCamelCase : Any = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCamelCase : int = n
def snake_case_ ( self , __a ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : Tuple = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def snake_case_ ( self ):
for k in self.dq_store:
print(__a )
def __repr__( self ):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 594 | 0 |
from math import factorial
class UpperCAmelCase__ :
def __init__( self , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = real
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_: Dict = [1] * rank
else:
UpperCAmelCase_: Optional[int] = rank
def __repr__( self ):
"""simple docstring"""
return (
F"{self.real}+"
F"{'+'.join(str(UpperCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase__ )
def __add__( self , A__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Dual(self.real + other , self.duals )
UpperCAmelCase_: List[Any] = self.duals.copy()
UpperCAmelCase_: List[str] = other.duals.copy()
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
o_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
elif len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
s_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
UpperCAmelCase_: Dict = []
for i in range(len(UpperCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase__ )
snake_case_ = __add__
def __sub__( self , A__ ):
"""simple docstring"""
return self + other * -1
def __mul__( self , A__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_: Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase__ )
UpperCAmelCase_: Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase__ )
snake_case_ = __mul__
def __truediv__( self , A__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_: Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase__ )
raise ValueError
def __floordiv__( self , A__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_: Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase__ )
raise ValueError
def __pow__( self , A__ ):
"""simple docstring"""
if n < 0 or isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCAmelCase_: str = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase ( _a ,_a ,_a ) -> Dict:
if not callable(_lowercase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowercase ,(float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowercase ,_lowercase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCAmelCase_: List[Any] = Dual(_lowercase ,1 )
UpperCAmelCase_: Dict = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase ( _a ) -> Optional[int]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 712 |
from datetime import datetime as dt
import os
from github import Github
_lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowercase ( ) -> Dict:
UpperCAmelCase_: Tuple = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_: Any = g.get_repo("huggingface/transformers" )
UpperCAmelCase_: Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_: Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda _a : i.created_at ,reverse=_a )
UpperCAmelCase_: Any = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main() | 306 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = "distilbert"
_UpperCAmelCase :Any = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=512 , _UpperCAmelCase=False , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=768 , _UpperCAmelCase=4 * 768 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: int = sinusoidal_pos_embds
lowercase__: Dict = n_layers
lowercase__: List[str] = n_heads
lowercase__: Tuple = dim
lowercase__: Union[str, Any] = hidden_dim
lowercase__: List[str] = dropout
lowercase__: Optional[int] = attention_dropout
lowercase__: Dict = activation
lowercase__: Union[str, Any] = initializer_range
lowercase__: Optional[Any] = qa_dropout
lowercase__: Dict = seq_classif_dropout
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
lowercase__: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__: Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 586 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''speech_to_text_2'''
_A : Optional[Any] = ['''past_key_values''']
_A : Tuple = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , __a : str=10000 , __a : str=6 , __a : Optional[int]=2048 , __a : Dict=4 , __a : str=0.0 , __a : Optional[Any]=True , __a : str="relu" , __a : Dict=256 , __a : str=0.1 , __a : Tuple=0.0 , __a : Any=0.0 , __a : Optional[int]=0.02 , __a : Any=2 , __a : Dict=True , __a : Optional[int]=1 , __a : Dict=0 , __a : List[Any]=2 , __a : str=1024 , **__a : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase : str = vocab_size
__lowercase : Optional[int] = d_model
__lowercase : Union[str, Any] = decoder_ffn_dim
__lowercase : str = decoder_layers
__lowercase : List[str] = decoder_attention_heads
__lowercase : int = dropout
__lowercase : List[str] = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : Optional[Any] = activation_function
__lowercase : Any = init_std
__lowercase : Dict = decoder_layerdrop
__lowercase : Tuple = use_cache
__lowercase : Optional[Any] = decoder_layers
__lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : Tuple = max_target_positions
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 720 |
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits | 649 | 0 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE = ["""speech"""]
def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["speech"] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE = ["""speech"""]
def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["speech"] )
| 197 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 508 | 0 |
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : list , UpperCamelCase_ : int | None = None , UpperCamelCase_ : int | None = None )-> None:
if start is None:
A__ = 0
if end is None:
A__ = len(a_ ) - 1
if start >= end:
return
A__ = (start + end) // 2
slowsort(a_ , a_ , a_ )
slowsort(a_ , mid + 1 , a_ )
if sequence[end] < sequence[mid]:
A__ = sequence[mid], sequence[end]
slowsort(a_ , a_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase__ ( UpperCamelCase_ : dict )-> tuple:
return (data["data"], data["target"])
def lowerCAmelCase__ ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray )-> XGBClassifier:
A__ = XGBClassifier()
classifier.fit(UpperCamelCase_ , UpperCamelCase_ )
return classifier
def lowerCAmelCase__ ( )-> None:
A__ = load_iris()
A__ , A__ = data_handling(UpperCamelCase_ )
A__ , A__ , A__ , A__ = train_test_split(
UpperCamelCase_ , UpperCamelCase_ , test_size=0.25 )
A__ = iris['''target_names''']
# Create an XGBoost Classifier from the training data
A__ = xgboost(UpperCamelCase_ , UpperCamelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , display_labels=UpperCamelCase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 526 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
a_ : List[str] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
a_ : Tuple = {
"jukebox": 512,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_LYRIC_TOKENS_SIZES
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple=["v3", "v2", "v2"] , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : Tuple=5 , snake_case__ : Optional[int]="<|endoftext|>" , **snake_case__ : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
super().__init__(
unk_token=snake_case__ , n_genres=snake_case__ , version=snake_case__ , max_n_lyric_tokens=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = version
SCREAMING_SNAKE_CASE = max_n_lyric_tokens
SCREAMING_SNAKE_CASE = n_genres
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(snake_case__ )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(snake_case__ )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(snake_case__ )
SCREAMING_SNAKE_CASE = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
SCREAMING_SNAKE_CASE = oov.replace(r'\-\'' , r'\-+\'' )
SCREAMING_SNAKE_CASE = regex.compile(snake_case__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.artists_encoder.get(snake_case__ , 0 ) for artist in list_artists]
for genres in range(len(snake_case__ ) ):
SCREAMING_SNAKE_CASE = [self.genres_encoder.get(snake_case__ , 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE = [[self.lyrics_encoder.get(snake_case__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase ( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
return list(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_for_tokenization(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self._tokenize(snake_case__ )
return artist, genre, lyrics
def UpperCamelCase ( self : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE = artists[idx].lower()
SCREAMING_SNAKE_CASE = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE = self._normalize(artists[idx] ) + '.v2'
SCREAMING_SNAKE_CASE = [
self._normalize(snake_case__ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
SCREAMING_SNAKE_CASE = {vocab[index]: index + 1 for index in range(len(snake_case__ ) )}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(snake_case__ ) + 1
SCREAMING_SNAKE_CASE = self.vocab
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE = self._run_strip_accents(snake_case__ )
SCREAMING_SNAKE_CASE = lyrics.replace('\\' , '\n' )
SCREAMING_SNAKE_CASE = self.out_of_vocab.sub('' , snake_case__ ), [], []
return artists, genres, lyrics
def UpperCamelCase ( self : List[str] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFD' , snake_case__ )
SCREAMING_SNAKE_CASE = []
for char in text:
SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat == "Mn":
continue
output.append(snake_case__ )
return "".join(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
[chr(snake_case__ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
SCREAMING_SNAKE_CASE = frozenset(snake_case__ )
SCREAMING_SNAKE_CASE = re.compile(r'_+' )
SCREAMING_SNAKE_CASE = ''.join([c if c in accepted else '_' for c in text.lower()] )
SCREAMING_SNAKE_CASE = pattern.sub('_' , snake_case__ ).strip('_' )
return text
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
return " ".join(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = TensorType(snake_case__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.constant
SCREAMING_SNAKE_CASE = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
SCREAMING_SNAKE_CASE = torch.tensor
SCREAMING_SNAKE_CASE = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE = jnp.array
SCREAMING_SNAKE_CASE = _is_jax
else:
SCREAMING_SNAKE_CASE = np.asarray
SCREAMING_SNAKE_CASE = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE = [inputs]
if not is_tensor(snake_case__ ):
SCREAMING_SNAKE_CASE = as_tensor(snake_case__ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[int]="" , snake_case__ : Dict="pt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [0, 0, 0]
SCREAMING_SNAKE_CASE = [artist] * len(self.version )
SCREAMING_SNAKE_CASE = [genres] * len(self.version )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.tokenize(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._convert_token_to_id(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def UpperCamelCase ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case__ ) )
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case__ ) )
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case__ ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.artists_decoder.get(snake_case__ )
SCREAMING_SNAKE_CASE = [self.genres_decoder.get(snake_case__ ) for genre in genres_index]
SCREAMING_SNAKE_CASE = [self.lyrics_decoder.get(snake_case__ ) for character in lyric_index]
return artist, genres, lyrics
| 439 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
def extract(*snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
class UpperCamelCase :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.ones([0] )
def UpperCamelCase ( self : Any , snake_case__ : List[str] ):
"""simple docstring"""
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE = 7_7
SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ )
SCREAMING_SNAKE_CASE = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE = 7_7
SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ )
# put models in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = vae.half()
SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type='np' , image=snake_case__ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE = init_image.resize((7_6_0, 5_0_4) )
SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
SCREAMING_SNAKE_CASE = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE = init_image.resize((7_6_8, 5_1_2) )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 439 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 0.1) -> int:
__snake_case: Union[str, Any] = 3
__snake_case: str = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(SCREAMING_SNAKE_CASE__)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
UpperCamelCase__ :int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase__ :int = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase__ :int = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :str = "pytorch" ):
UpperCamelCase__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :Optional[Any] = os.path.join(lowerCamelCase__ , """output""" )
UpperCamelCase__ :str = os.path.join(lowerCamelCase__ , """data""" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase__ :List[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
UpperCamelCase__ :Optional[int] = os.path.join(lowerCamelCase__ , """metrics.json""" )
with open(lowerCamelCase__ ) as f:
UpperCamelCase__ :List[str] = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def __a ( self :Optional[Any] ):
UpperCamelCase__ :List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def __a ( self :str ):
UpperCamelCase__ :Any = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) | 45 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__snake_case = input_file.read()
__snake_case = regexp.search(a_ )
return match
def A ( self : Any , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__snake_case = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case = regexp.finditer(a_ )
__snake_case = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 69 | 0 |
import math
import tensorflow as tf
from packaging import version
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
__lowerCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
__lowerCAmelCase = tf.cast(math.pi , x.dtype )
__lowerCAmelCase = tf.cast(0.044715 , x.dtype )
__lowerCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCAmelCase__ , 3 )) ))
return x * cdf
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCAmelCase__ ) )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
__lowerCAmelCase = tf.cast(0.044715 , x.dtype )
__lowerCAmelCase = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
__lowerCAmelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCAmelCase__ ) , -10 , 10 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__=-1 ):
"""simple docstring"""
__lowerCAmelCase, __lowerCAmelCase = tf.split(UpperCAmelCase__ , 2 , axis=UpperCAmelCase__ )
return a * tf.math.sigmoid(UpperCAmelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCAmelCase__ , approximate=UpperCAmelCase__ )
lowerCamelCase = tf.keras.activations.gelu
lowerCamelCase = approximate_gelu_wrap
else:
lowerCamelCase = _gelu
lowerCamelCase = _gelu_new
lowerCamelCase = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102 | 1 |
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase = list[list[float | int]]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : int = len(lowercase_ )
__UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
__UpperCAmelCase : Any = matrix[row][col]
__UpperCAmelCase : Optional[int] = vector[row][0]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[str] = 0
while row < size and col < size:
# pivoting
__UpperCAmelCase : Tuple = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
__UpperCAmelCase : Optional[int] = augmented[rowa][col] / augmented[row][col]
__UpperCAmelCase : Optional[int] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
__UpperCAmelCase : Tuple = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Callable[[int], int]:
'''simple docstring'''
__UpperCAmelCase : int = len(lowercase_ )
__UpperCAmelCase : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
__UpperCAmelCase : Matrix = [[0] for _ in range(lowercase_ )]
__UpperCAmelCase : Matrix
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
__UpperCAmelCase : str = (x_val + 1) ** (size - col - 1)
__UpperCAmelCase : str = y_val
__UpperCAmelCase : int = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __SCREAMING_SNAKE_CASE ( lowercase_ = question_function , lowercase_ = 10 ) -> int:
'''simple docstring'''
__UpperCAmelCase : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
__UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCAmelCase : int = 0
__UpperCAmelCase : Callable[[int], int]
__UpperCAmelCase : int
for poly in polynomials:
__UpperCAmelCase : Optional[int] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 462 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=0.6 , _lowerCamelCase=None , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = mask_ratio
lowerCAmelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = ViTMAEModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase )
lowerCAmelCase_ = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_lowerCamelCase )
lowerCAmelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
__A : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__A : Optional[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__A : str = False
__A : Any = False
__A : int = False
__A : Dict = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ViTMAEModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_lowerCamelCase )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ = torch.from_numpy(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ = pt_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCAmelCase_ = outputs[0].cpu().numpy()
lowerCAmelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCAmelCase_ = model_class.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
# Make sure we don't have nans
lowerCAmelCase_ = after_outputs[0].cpu().numpy()
lowerCAmelCase_ = 0
lowerCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase_ ( self ):
pass
@slow
def UpperCAmelCase_ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = ViTMAEModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case_ ( ) -> Union[str, Any]:
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_lowerCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ = ViTMAEConfig()
lowerCAmelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase , noise=torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase ) )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCamelCase ) , atol=1E-4 ) )
| 714 | '''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : int) -> list[int]:
lowerCAmelCase_ = 2
lowerCAmelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__snake_case)
if n > 1:
factors.append(__snake_case)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: str, a_: str ):
assert x is not None
assert y is not None
_UpperCAmelCase : List[Any] = len(a_ )
_UpperCAmelCase : str = len(a_ )
# declaring the array for storing the dp values
_UpperCAmelCase : Union[str, Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
_UpperCAmelCase : str = 1 if x[i - 1] == y[j - 1] else 0
_UpperCAmelCase : Tuple = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
_UpperCAmelCase : Optional[int] = ""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = m, n
while i > 0 and j > 0:
_UpperCAmelCase : int = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCAmelCase : Optional[int] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__a = 'AGGTAB'
__a = 'GXTXAYB'
__a = 4
__a = 'GTAB'
__a , __a = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod() | 494 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
@dataclass
class A__ :
"""simple docstring"""
_lowercase : nn.Module
_lowercase : List[nn.Module] = field(default_factory=A )
_lowercase : list = field(default_factory=A )
def __magic_name__ ( self : int , A_ : Optional[int] , A_ : Tensor , A_ : Tensor ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self : Optional[Any] , A_ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A__ :
"""simple docstring"""
_lowercase : nn.Module
_lowercase : nn.Module
_lowercase : int = 1
_lowercase : List = field(default_factory=A )
_lowercase : List = field(default_factory=A )
_lowercase : bool = True
def __call__( self : Union[str, Any] , A_ : Tensor ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Tracker(self.dest )(A_ ).parametrized
_lowerCAmelCase : str = Tracker(self.src )(A_ ).parametrized
_lowerCAmelCase : Any = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCAmelCase : List[Any] = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(A_ )} operations while'''
F''' destination module has {len(A_ )}.''' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : nn.Module ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
_lowerCAmelCase : Any = len(A_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
_lowerCAmelCase : Dict = nn.ModuleDict(A_ )
def __magic_name__ ( self : Optional[int] , A_ : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
A_ , out_feat_keys=A_ , feature_blocks=self._feature_blocks , )
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : int = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Dict , A_ : str ):
'''simple docstring'''
if x not in self:
_lowerCAmelCase : List[Any] = self.convert_name_to_timm(A_ )
_lowerCAmelCase : int = partial(lambda: (timm.create_model(A_ , pretrained=A_ ).eval(), None) )
else:
_lowerCAmelCase : List[str] = super().__getitem__(A_ )
return val
class A__ ( A ):
"""simple docstring"""
def __getitem__( self : int , A_ : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
_lowerCAmelCase : str = RegNetModel
else:
_lowerCAmelCase : Tuple = RegNetForImageClassification
return val
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
for from_key, to_key in keys:
_lowerCAmelCase : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> Dict:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = from_model_func()
_lowerCAmelCase : Optional[int] = our_model_func(SCREAMING_SNAKE_CASE ).eval()
_lowerCAmelCase : Any = ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE , raise_if_mismatch=SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
_lowerCAmelCase : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_lowerCAmelCase : Optional[int] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_lowerCAmelCase : Dict = manually_copy_vissl_head(SCREAMING_SNAKE_CASE , our_model.state_dict() , SCREAMING_SNAKE_CASE )
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[int] = our_model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[Any] = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
_lowerCAmelCase : List[Any] = from_model(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[Any] = from_output[-1] if type(SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_lowerCAmelCase : List[str] = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE , )
_lowerCAmelCase : Tuple = 224 if "seer" not in name else 384
# we can use the convnext one
_lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE , )
print(f'''Pushed {name}''' )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True ) -> Any:
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = "imagenet-1k-id2label.json"
_lowerCAmelCase : int = 1000
_lowerCAmelCase : int = (1, num_labels)
_lowerCAmelCase : Tuple = "huggingface/label-files"
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
_lowerCAmelCase : int = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = idalabel
_lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
_lowerCAmelCase : int = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
_lowerCAmelCase : Tuple = NameToOurModelFuncMap()
_lowerCAmelCase : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[nn.Module, Dict]:
_lowerCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , model_dir=str(SCREAMING_SNAKE_CASE ) , map_location="cpu" )
_lowerCAmelCase : str = model_func()
# check if we have a head, if yes add it
_lowerCAmelCase : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
_lowerCAmelCase : Dict = model_state_dict["trunk"]
model.load_state_dict(SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
_lowerCAmelCase : Optional[Any] = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Optional[int] = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : List[Any] = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase : Any = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_lowerCAmelCase : str = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : List[str] = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Dict = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase : Optional[int] = partial(
SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 503 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Optional[Any] , A_ : Dict=1_3 , A_ : str=7 , A_ : Union[str, Any]=True , A_ : int=True , A_ : Any=False , A_ : str=True , A_ : int=9_9 , A_ : int=3_2 , A_ : Optional[int]=5 , A_ : List[str]=4 , A_ : int=6_4 , A_ : Optional[int]="gelu" , A_ : List[Any]=0.1 , A_ : int=0.1 , A_ : List[str]=5_1_2 , A_ : Optional[Any]=1_6 , A_ : int=2 , A_ : Optional[int]=0.02 , A_ : Any=3 , A_ : Optional[Any]=4 , A_ : Union[str, Any]=None , A_ : Union[str, Any]=2 , A_ : Tuple=2 , A_ : Optional[int]=2 , A_ : List[Any]=2 , A_ : List[str]=4 , A_ : Union[str, Any]=1 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Union[str, Any] = q_groups
_lowerCAmelCase : Tuple = k_groups
_lowerCAmelCase : str = v_groups
_lowerCAmelCase : Tuple = post_attention_groups
_lowerCAmelCase : Tuple = intermediate_groups
_lowerCAmelCase : List[Any] = output_groups
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , A_ )
_lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , A_ : int , A_ : Dict , A_ : Any , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , A_ : Optional[int] , A_ : Dict , A_ : str , A_ : Tuple , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_choices
_lowerCAmelCase : Dict = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Optional[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : List[str] = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = SqueezeBertModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=A_ , dim=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_lowerCAmelCase : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowerCAmelCase : List[str] = model(A_ )[0]
_lowerCAmelCase : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , A_ )
_lowerCAmelCase : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-4 ) )
| 503 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
a : Union[str, Any] ="WhisperFeatureExtractor"
a : List[str] ="WhisperTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ )
lowerCAmelCase : Any = self.feature_extractor
lowerCAmelCase : Optional[Any] = False
def lowercase__ ( self , snake_case__=None , snake_case__=None , snake_case__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_ )
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
lowerCAmelCase : Optional[int] = kwargs.pop("audio" , lowercase_ )
lowerCAmelCase : List[Any] = kwargs.pop("sampling_rate" , lowercase_ )
lowerCAmelCase : Optional[int] = kwargs.pop("text" , lowercase_ )
if len(lowercase_ ) > 0:
lowerCAmelCase : Dict = args[0]
lowerCAmelCase : int = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase : Tuple = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
if text is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Dict = encodings["input_ids"]
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
def lowercase__ ( self , snake_case__ , snake_case__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_ )
| 645 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ : int = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def A ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[int]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
__snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=3_2 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = initializer_range
def _a ( self) -> Any:
__snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
__snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
__snake_case = shift_tokens_right(lowercase_ , 1 , 2)
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
__snake_case = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def _a ( self) -> List[str]:
__snake_case , __snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
__snake_case = 2_0
__snake_case = model_class_name(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'])
__snake_case , __snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
__snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4')
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
__snake_case = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
__snake_case = model.decode(lowercase_ , lowercase_)
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = 2_0
__snake_case = model_class_name(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'])
__snake_case , __snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
__snake_case = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
__snake_case = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_)
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
@require_flax
class __lowercase ( unittest.TestCase ):
__UpperCAmelCase = 99
def _a ( self) -> str:
__snake_case = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__snake_case = input_ids.shape[0]
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self) -> Tuple:
__snake_case , __snake_case , __snake_case = self._get_config_and_data()
__snake_case = FlaxBlenderbotForConditionalGeneration(lowercase_)
__snake_case = lm_model(input_ids=lowercase_)
__snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_)
def _a ( self) -> Tuple:
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__snake_case = FlaxBlenderbotForConditionalGeneration(lowercase_)
__snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
__snake_case = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
__snake_case = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_)
__snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_)
def _a ( self) -> List[str]:
__snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
__snake_case = shift_tokens_right(lowercase_ , 1 , 2)
__snake_case = np.equal(lowercase_ , 1).astype(np.floataa).sum()
__snake_case = np.equal(lowercase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowercase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class __lowercase ( lowerCamelCase__ , unittest.TestCase , lowerCamelCase__ ):
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self) -> Dict:
__snake_case = FlaxBlenderbotModelTester(self)
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_)
def _a ( self) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__snake_case = self._prepare_for_class(lowercase_ , lowercase_)
__snake_case = model_class(lowercase_)
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_)
with self.subTest('JIT Enabled'):
__snake_case = encode_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__snake_case = encode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__snake_case = model_class(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'])
__snake_case = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('JIT Enabled'):
__snake_case = decode_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__snake_case = decode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _a ( self) -> str:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case = np.ones((1, 1)) * model.config.eos_token_id
__snake_case = model(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.')
@slow
def _a ( self) -> int:
__snake_case = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
__snake_case = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowercase_)
__snake_case = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
__snake_case = ['Sam']
__snake_case = tokenizer(lowercase_ , return_tensors='jax')
__snake_case = model.generate(**lowercase_ , **lowercase_)
__snake_case = 'Sam is a great name. It means "sun" in Gaelic.'
__snake_case = tokenizer.batch_decode(lowercase_ , **lowercase_)
assert generated_txt[0].strip() == tgt_text
| 313 | 0 |
def UpperCamelCase__ ( _A: Tuple , _A: List[str] , _A: Tuple , _A: Dict , _A: int , _A: List[str] ):
'''simple docstring'''
if index == r:
for j in range(_A ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__lowerCamelCase = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase__ ( _A: Union[str, Any] , _A: List[str] , _A: str ):
'''simple docstring'''
__lowerCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_a : Union[str, Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 571 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''tokenizer''']
A = '''ViltImageProcessor'''
A = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
__lowerCamelCase = kwargs.pop("""feature_extractor""" )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = self.image_processor
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
__lowerCamelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel_values + pixel_mask
__lowerCamelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , )
return self.image_processor
| 571 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Tuple , _lowercase : str , _lowercase : Union[str, Any]=None , _lowercase : Tuple=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] ):
__UpperCAmelCase = parent
__UpperCAmelCase = config_class
__UpperCAmelCase = has_text_modality
__UpperCAmelCase = kwargs
__UpperCAmelCase = common_properties
def a ( self : Tuple ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
__UpperCAmelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowercase , _lowercase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowercase ):
try:
setattr(_lowercase , _lowercase , _lowercase )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowercase ):
try:
__UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def a ( self : List[Any] ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
__UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(_lowercase , '''config.json''' )
config_first.to_json_file(_lowercase )
__UpperCAmelCase = self.config_class.from_json_file(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def a ( self : Dict ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowercase )
__UpperCAmelCase = self.config_class.from_pretrained(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def a ( self : Dict ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
__UpperCAmelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
config_first.save_pretrained(_lowercase )
__UpperCAmelCase = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def a ( self : Any ):
if self.config_class.is_composition:
return
__UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = self.config_class(**_lowercase )
__UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_lowercase , _lowercase ) != value:
wrong_values.append((key, getattr(_lowercase , _lowercase ), value) )
if len(_lowercase ) > 0:
__UpperCAmelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def a ( self : Tuple ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 49 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case :
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : str=None , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = TFVisionTextDualEncoderModel(lowerCAmelCase)
_snake_case : Dict = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : List[str] = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
_snake_case : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase)
_snake_case : List[Any] = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : str = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
_snake_case : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase)
_snake_case : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase)
_snake_case : Tuple = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
_snake_case : Optional[int] = after_output[0].numpy()
_snake_case : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase , 1E-5)
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
_snake_case , _snake_case : List[str] = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : Tuple = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase)
_snake_case : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Any = to_atuple(vision_model.config.image_size)
_snake_case : int = to_atuple(vision_model.config.patch_size)
_snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_snake_case : int = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray , lowerCAmelCase : float) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = np.abs((a - b)).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase_ ( self : Any) -> int:
"""simple docstring"""
_snake_case : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase)
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase)
@slow
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_pretrained_model_and_inputs()
_snake_case : Optional[Any] = model_a(**lowerCAmelCase)
_snake_case : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase)
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase)
_snake_case : Dict = model_a(**lowerCAmelCase)
_snake_case : int = after_outputs[0].numpy()
_snake_case : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase , 1E-5)
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""")
_snake_case : Union[str, Any] = 13
_snake_case : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : Tuple = random_attention_mask([batch_size, 4])
_snake_case : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFViTModel(lowerCAmelCase , name="""vision_model""")
_snake_case : List[Any] = TFBertModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = TFViTModelTester(self)
_snake_case : Optional[Any] = TFBertModelTester(self)
_snake_case : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_snake_case : int = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Dict = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""")
_snake_case : int = 13
_snake_case : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : int = random_attention_mask([batch_size, 4])
_snake_case : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int=None , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Any = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : int = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : Optional[Any] = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase)
_snake_case : Dict = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_snake_case : List[Any] = to_atuple(vision_model.config.image_size)
_snake_case : str = to_atuple(vision_model.config.patch_size)
_snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : List[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_snake_case : Dict = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[int] = TFDeiTModel(lowerCAmelCase , name="""vision_model""")
_snake_case : Optional[int] = TFRobertaModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = TFDeiTModelTester(self)
_snake_case : Tuple = TFRobertaModelTester(self)
_snake_case : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_snake_case : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[Any] = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""")
_snake_case : str = 13
_snake_case : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : str = random_attention_mask([batch_size, 4])
_snake_case : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = TFCLIPVisionModel(lowerCAmelCase , name="""vision_model""")
_snake_case : List[Any] = TFBertModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : str) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = TFCLIPVisionModelTester(self)
_snake_case : int = TFBertModelTester(self)
_snake_case : Any = clip_model_tester.prepare_config_and_inputs()
_snake_case : str = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case : Optional[Any] = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase)
_snake_case : str = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""")
_snake_case : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_snake_case : List[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""np""")
_snake_case : Optional[int] = model(**lowerCAmelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case : int = np.array([[1.2_284_727, 0.3_104_122]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase , atol=1E-3))
| 198 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
a__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Any:
_snake_case : Union[str, Any] = {}
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case : Union[str, Any] = line.strip()
if line:
_snake_case : List[str] = line.split()
_snake_case : List[Any] = line_number
_snake_case : int = words[0]
_snake_case : str = value
return result
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
for attribute in key.split(""".""" ):
_snake_case : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
_snake_case : Union[str, Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_snake_case : Any = """param"""
if weight_type is not None and weight_type != "param":
_snake_case : int = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
_snake_case : Dict = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Any = shape_pointer.shape
# let's reduce dimension
_snake_case : str = value[0]
else:
_snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case : Dict = value
elif weight_type == "weight_g":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
_snake_case : str = value
elif weight_type == "bias":
_snake_case : Dict = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_snake_case : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : int = value
else:
_snake_case : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
_snake_case : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_snake_case : Optional[Any] = """param"""
if weight_type is not None and weight_type != "param":
_snake_case : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_snake_case : List[str] = """.""".join([key, hf_param_name] )
else:
_snake_case : Optional[int] = key
_snake_case : Any = value if """lm_head""" in full_key else value[0]
a__ = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> Optional[Any]:
_snake_case : Any = False
for key, mapped_key in MAPPING.items():
_snake_case : List[str] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Optional[int] = True
if "*" in mapped_key:
_snake_case : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
_snake_case : List[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case : Tuple = """weight_g"""
elif "weight_v" in name:
_snake_case : Dict = """weight_v"""
elif "bias" in name:
_snake_case : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : List[Any] = """weight"""
else:
_snake_case : Dict = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
_snake_case : Optional[Any] = []
_snake_case : Tuple = fairseq_model.state_dict()
_snake_case : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : List[str] = True
else:
_snake_case : Dict = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
_snake_case : List[str] = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : Optional[Any] = int(items[0] )
_snake_case : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
if config_path is not None:
_snake_case : List[Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : Union[str, Any] = WavaVecaConfig()
if is_seq_class:
_snake_case : Union[str, Any] = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : str = idalabel
_snake_case : Union[str, Any] = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
_snake_case : Optional[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Tuple = target_dict.pad_index
_snake_case : Optional[int] = target_dict.bos_index
_snake_case : int = target_dict.eos_index
_snake_case : Any = len(target_dict.symbols )
_snake_case : int = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : Optional[int] = 0
_snake_case : Tuple = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
_snake_case : Any = True if config.feat_extract_norm == """layer""" else False
_snake_case : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
_snake_case : List[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : List[str] = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
_snake_case , _snake_case , _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case : Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
_snake_case : Dict = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case , _snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
a__ = parser.parse_args()
a__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 198 | 1 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_A )
return parser.parse_args()
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
# Import training_script as a module.
_UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase = script_fpath.stem
_UpperCAmelCase = importlib.import_module(_A )
# Patch sys.argv
_UpperCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
import os
def __a ( ):
with open(os.path.dirname(A__ ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE = 0
return total_score
if __name__ == "__main__":
print(solution()) | 16 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-1'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-2'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-3'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-4'
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ):
super()._init_()
UpperCamelCase__ : int = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def __UpperCamelCase ( self : Optional[Any]):
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith('_')}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ : Dict = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ : List[str] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 596 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="attention" ):
'''simple docstring'''
_lowerCAmelCase : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
_lowerCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase : Optional[int] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
_lowerCAmelCase : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
_lowerCAmelCase : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase : Tuple = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
_lowerCAmelCase : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
if split_mlp_wi:
_lowerCAmelCase : List[str] = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
_lowerCAmelCase : int = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
_lowerCAmelCase : Dict = (wi_a, wi_a)
else:
_lowerCAmelCase : List[Any] = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
_lowerCAmelCase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def A ( _lowerCamelCase , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(variables["target"] )
_lowerCAmelCase : List[str] = {"/".join(_lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase : int = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCamelCase )
_lowerCAmelCase : List[str] = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase : Dict = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : int = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "attention" )
_lowerCAmelCase : Union[str, Any] = layer_norm
_lowerCAmelCase : Optional[Any] = k.T
_lowerCAmelCase : Tuple = o.T
_lowerCAmelCase : Tuple = q.T
_lowerCAmelCase : int = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase : Any = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = wi[0].T
_lowerCAmelCase : str = wi[1].T
else:
_lowerCAmelCase : Optional[int] = wi.T
_lowerCAmelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : Any = tax_relpos_bias_lookup(
_lowerCamelCase , _lowerCamelCase , "encoder" ).T
_lowerCAmelCase : Optional[int] = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_lowerCAmelCase : Any = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , "encoder" ).T
_lowerCAmelCase : int = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : Dict = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "self_attention" )
_lowerCAmelCase : List[str] = layer_norm
_lowerCAmelCase : List[str] = k.T
_lowerCAmelCase : Tuple = o.T
_lowerCAmelCase : Optional[Any] = q.T
_lowerCAmelCase : Any = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase : List[str] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "encoder_decoder_attention" )
_lowerCAmelCase : Any = layer_norm
_lowerCAmelCase : List[str] = k.T
_lowerCAmelCase : Dict = o.T
_lowerCAmelCase : Dict = q.T
_lowerCAmelCase : str = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase : Optional[int] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , _lowerCamelCase )
_lowerCAmelCase : str = layer_norm
if split_mlp_wi:
_lowerCAmelCase : List[Any] = wi[0].T
_lowerCAmelCase : int = wi[1].T
else:
_lowerCAmelCase : Any = wi.T
_lowerCAmelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : int = tax_relpos_bias_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" ).T
_lowerCAmelCase : Optional[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase : List[str] = old["decoder/logits_dense/kernel"].T
return new
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : Optional[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_lowerCAmelCase : Tuple = state_dict["shared.weight"]
return state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : int = convert_tax_to_pytorch(
_lowerCamelCase , num_layers=config.num_layers , is_encoder_only=_lowerCamelCase , scalable_attention=_lowerCamelCase )
_lowerCAmelCase : Any = make_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = MTaConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase : List[Any] = UMTaEncoderModel(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = UMTaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCamelCase )
print("Done" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.