code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1 )-> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Dict:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCamelCase = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCamelCase = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCamelCase = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCamelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCamelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item
UpperCamelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCamelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCamelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCamelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None )-> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase = old_checkpoint[path]
UpperCamelCase = old_tensor.shape[0] // 3
UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase = query.reshape(__UpperCamelCase )
UpperCamelCase = key.reshape(__UpperCamelCase )
UpperCamelCase = value.reshape(__UpperCamelCase )
for path in paths:
UpperCamelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCamelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCamelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase = old_checkpoint[path["""old"""]]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = checkpoint["""time_embed.0.weight"""]
UpperCamelCase = checkpoint["""time_embed.0.bias"""]
UpperCamelCase = checkpoint["""time_embed.2.weight"""]
UpperCamelCase = checkpoint["""time_embed.2.bias"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase = checkpoint["""out.0.weight"""]
UpperCamelCase = checkpoint["""out.0.bias"""]
UpperCamelCase = checkpoint["""out.2.weight"""]
UpperCamelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
UpperCamelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCamelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
UpperCamelCase = middle_blocks[0]
UpperCamelCase = middle_blocks[1]
UpperCamelCase = middle_blocks[2]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
UpperCamelCase = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
UpperCamelCase = {}
for layer in output_block_layers:
UpperCamelCase ,UpperCamelCase = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
UpperCamelCase = [layer_name]
if len(__UpperCamelCase ) > 1:
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
UpperCamelCase = []
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
UpperCamelCase = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase = """.""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
UpperCamelCase = """.""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 705
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowercase__ ( __UpperCamelCase )-> Any:
UpperCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase ,UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
UpperCamelCase = mam_aaa["""model"""]
remove_ignore_keys_(__UpperCamelCase )
UpperCamelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
UpperCamelCase = MaMaaaConfig(
vocab_size=__UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
UpperCamelCase = state_dict["""decoder.embed_tokens.weight"""]
UpperCamelCase = MaMaaaForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase )-> str:
return int(x / 2**20 )
class a_ :
def __enter__( self ) -> List[str]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = torch.cuda.max_memory_allocated()
UpperCamelCase = bamb(self.end - self.begin )
UpperCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" , __UpperCamelCase = 320 , __UpperCamelCase = 160 , )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F"train[:{n_train}]", """validation""": F"validation[:{n_val}]"} )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[str]:
# Initialize accelerator
UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = args.model_name_or_path
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase = 1
UpperCamelCase = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
UpperCamelCase = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase = 0
# Now we train the model
UpperCamelCase = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=__UpperCamelCase , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=__UpperCamelCase , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( lowerCamelCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = tf_padding
UpperCamelCase = int(last_hidden_size * depth_multiplier )
UpperCamelCase = output_stride
UpperCamelCase = hidden_act
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 35
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
while b:
UpperCamelCase ,UpperCamelCase = b, a % b
return a
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
return a if b == 0 else euclidean_gcd_recursive(__UpperCamelCase , a % b )
def lowercase__ ( )-> List[Any]:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RealmTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop("""text_pair""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("""return_tensors""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get("""input_ids""" )
UpperCamelCase = encoded_candidates.get("""attention_mask""" )
UpperCamelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCamelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCamelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 709
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35
| 0
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 35
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
SCREAMING_SNAKE_CASE__ = get_tests_dir('fixtures')
class a_ ( unittest.TestCase ):
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
UpperCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class a_ ( unittest.TestCase ):
@classmethod
def A__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def A__ ( cls ) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""test-image-processor""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
UpperCamelCase = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCamelCase = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 711
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = ["""stem"""]
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> int:
"""simple docstring"""
return
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
F" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , lowerCamelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 35
| 0
|
'''simple docstring'''
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowercase__ ( __UpperCamelCase=None )-> str:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCamelCase = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__UpperCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__UpperCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCamelCase = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__UpperCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase = defaults.commands
if not args.tpu_name:
UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCamelCase = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
UpperCamelCase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCamelCase = """; """.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(__UpperCamelCase )}" )
return
subprocess.run(__UpperCamelCase )
print("""Successfully setup pod.""" )
def lowercase__ ( )-> Dict:
UpperCamelCase = tpu_command_parser()
UpperCamelCase = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 712
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand()
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand(args.accelerate_config_file )
class a_ ( lowerCamelCase ):
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
"""--accelerate-config_file""" , default=_SCREAMING_SNAKE_CASE , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = accelerate_config_file
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
UpperCamelCase = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
UpperCamelCase = """not installed"""
UpperCamelCase = UpperCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F"\t{accelerate_config}"
)
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_torch_available():
import torch
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
UpperCamelCase = flax.__version__
UpperCamelCase = jax.__version__
UpperCamelCase = jaxlib.__version__
UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
UpperCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 35
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
for attribute in key.split(""".""" ):
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
UpperCamelCase = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
UpperCamelCase = """weight_g"""
elif "weight_v" in name:
UpperCamelCase = """weight_v"""
elif "weight" in name:
UpperCamelCase = """weight"""
elif "bias" in name:
UpperCamelCase = """bias"""
else:
UpperCamelCase = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase = name.split(""".""" )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = SEWConfig()
if is_finetuned:
UpperCamelCase = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase = model.cfg
UpperCamelCase = fs_config.conv_bias
UpperCamelCase = eval(fs_config.conv_feature_layers )
UpperCamelCase = [x[0] for x in conv_layers]
UpperCamelCase = [x[1] for x in conv_layers]
UpperCamelCase = [x[2] for x in conv_layers]
UpperCamelCase = """gelu"""
UpperCamelCase = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
UpperCamelCase = 0.0
UpperCamelCase = fs_config.activation_fn.name
UpperCamelCase = fs_config.encoder_embed_dim
UpperCamelCase = 0.02
UpperCamelCase = fs_config.encoder_ffn_embed_dim
UpperCamelCase = 1E-5
UpperCamelCase = fs_config.encoder_layerdrop
UpperCamelCase = fs_config.encoder_attention_heads
UpperCamelCase = fs_config.conv_pos_groups
UpperCamelCase = fs_config.conv_pos
UpperCamelCase = len(__UpperCamelCase )
UpperCamelCase = fs_config.encoder_layers
UpperCamelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase = model.cfg
UpperCamelCase = fs_config.final_dropout
UpperCamelCase = fs_config.layerdrop
UpperCamelCase = fs_config.activation_dropout
UpperCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase = fs_config.attention_dropout
UpperCamelCase = fs_config.dropout_input
UpperCamelCase = fs_config.dropout
UpperCamelCase = fs_config.mask_channel_length
UpperCamelCase = fs_config.mask_channel_prob
UpperCamelCase = fs_config.mask_length
UpperCamelCase = fs_config.mask_prob
UpperCamelCase = """Wav2Vec2FeatureExtractor"""
UpperCamelCase = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True )-> List[str]:
if is_finetuned:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCamelCase = SEWConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = convert_config(model[0] , __UpperCamelCase )
UpperCamelCase = model[0].eval()
UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(__UpperCamelCase , """vocab.json""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCamelCase )
UpperCamelCase = WavaVecaCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCamelCase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
UpperCamelCase = SEWForCTC(__UpperCamelCase )
else:
UpperCamelCase = SEWModel(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 713
|
'''simple docstring'''
from math import factorial
def lowercase__ ( __UpperCamelCase = 20 )-> int:
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35
| 0
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE__ = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE__ = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
SCREAMING_SNAKE_CASE__ = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
def remove_articles(__UpperCamelCase ):
UpperCamelCase = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__UpperCamelCase , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = [any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 100
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase = Counter(__UpperCamelCase )
UpperCamelCase = Counter(__UpperCamelCase )
UpperCamelCase = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase = scount * numref
UpperCamelCase = Counter(__UpperCamelCase )
UpperCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase = ccount * numref
# KEEP
UpperCamelCase = sgramcounter_rep & cgramcounter_rep
UpperCamelCase = keepgramcounter_rep & rgramcounter
UpperCamelCase = sgramcounter_rep & rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(__UpperCamelCase ) > 0:
UpperCamelCase = keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase = sgramcounter_rep - cgramcounter_rep
UpperCamelCase = delgramcounter_rep - rgramcounter
UpperCamelCase = sgramcounter_rep - rgramcounter
UpperCamelCase = 0
UpperCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
if len(__UpperCamelCase ) > 0:
UpperCamelCase = deltmpscorea / len(__UpperCamelCase )
# ADDITION
UpperCamelCase = set(__UpperCamelCase ) - set(__UpperCamelCase )
UpperCamelCase = set(__UpperCamelCase ) & set(__UpperCamelCase )
UpperCamelCase = set(__UpperCamelCase ) - set(__UpperCamelCase )
UpperCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase = 1
UpperCamelCase = 1
if len(__UpperCamelCase ) > 0:
UpperCamelCase = addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase = addtmpscore / len(__UpperCamelCase )
UpperCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = len(__UpperCamelCase )
UpperCamelCase = ssent.split(""" """ )
UpperCamelCase = csent.split(""" """ )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for rsent in rsents:
UpperCamelCase = rsent.split(""" """ )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
UpperCamelCase = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
UpperCamelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
UpperCamelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
UpperCamelCase = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
UpperCamelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
UpperCamelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
UpperCamelCase = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
UpperCamelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
UpperCamelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
((UpperCamelCase) ,(UpperCamelCase) ,(UpperCamelCase)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((UpperCamelCase) ,(UpperCamelCase) ,(UpperCamelCase)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((UpperCamelCase) ,(UpperCamelCase) ,(UpperCamelCase)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((UpperCamelCase) ,(UpperCamelCase) ,(UpperCamelCase)) = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = "13a" , __UpperCamelCase = True )-> Union[str, Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase = sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
UpperCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
UpperCamelCase = sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
UpperCamelCase = sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
UpperCamelCase = sentence
if not return_str:
UpperCamelCase = normalized_sent.split()
return normalized_sent
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCamelCase = 0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
UpperCamelCase = sari_score / len(__UpperCamelCase )
return 100 * sari_score
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="exp" , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , )-> List[Any]:
UpperCamelCase = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCamelCase = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
UpperCamelCase = sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
result.update({"""sari""": compute_sari(sources=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
result.update({"""exact""": compute_em(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
return result
| 714
|
'''simple docstring'''
from math import sqrt
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = 0
for i in range(1 , int(sqrt(__UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCamelCase ):
total += i + n // i
elif i == sqrt(__UpperCamelCase ):
total += i
return total - n
def lowercase__ ( __UpperCamelCase = 10000 )-> int:
UpperCamelCase = sum(
i
for i in range(1 , __UpperCamelCase )
if sum_of_divisors(sum_of_divisors(__UpperCamelCase ) ) == i and sum_of_divisors(__UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35
| 0
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = [0]
UpperCamelCase = [0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
UpperCamelCase = [60]
UpperCamelCase = [10]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = [1, 2, 3]
UpperCamelCase = [3, 2, 1]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 5 )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = 50
UpperCamelCase = [60, 100, 120]
UpperCamelCase = [10, 20, 30]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 220 )
if __name__ == "__main__":
unittest.main()
| 715
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE__ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 716
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(__UpperCamelCase ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35
| 0
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class a_ ( enum.Enum ):
'''simple docstring'''
lowercase = """all_checks"""
lowercase = """basic_checks"""
lowercase = """no_checks"""
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None )-> int:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase = """ for """ + verification_name if verification_name is not None else """"""
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( lowerCamelCase ):
'''simple docstring'''
pass
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[str]:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCamelCase = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = True )-> dict:
if record_checksum:
UpperCamelCase = shaaaa()
with open(__UpperCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"""""" ):
m.update(__UpperCamelCase )
UpperCamelCase = m.hexdigest()
else:
UpperCamelCase = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowercase__ ( __UpperCamelCase )-> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 717
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( __UpperCamelCase=None )-> Union[str, Any]:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""env""" )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = is_xpu_available()
UpperCamelCase = is_npu_available()
UpperCamelCase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__UpperCamelCase ),
"""PyTorch NPU available""": str(__UpperCamelCase ),
"""System RAM""": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCamelCase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F"\t{accelerate_config}"
)
print(__UpperCamelCase )
UpperCamelCase = accelerate_config
return info
def lowercase__ ( )-> int:
UpperCamelCase = env_command_parser()
UpperCamelCase = parser.parse_args()
env_command(__UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 35
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE__ = gym.make(env_name)
SCREAMING_SNAKE_CASE__ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ = env.reset()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 718
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCamelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase__ ( __UpperCamelCase )-> Dict:
UpperCamelCase = {}
UpperCamelCase = job["""started_at"""]
UpperCamelCase = job["""completed_at"""]
UpperCamelCase = date_parser.parse(__UpperCamelCase )
UpperCamelCase = date_parser.parse(__UpperCamelCase )
UpperCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCamelCase = start
UpperCamelCase = end
UpperCamelCase = duration_in_min
return job_info
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> Union[str, Any]:
UpperCamelCase = None
if token is not None:
UpperCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCamelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
UpperCamelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
UpperCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
UpperCamelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 719
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1 )-> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Dict:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCamelCase = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCamelCase = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCamelCase = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCamelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCamelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item
UpperCamelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCamelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCamelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCamelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None )-> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase = old_checkpoint[path]
UpperCamelCase = old_tensor.shape[0] // 3
UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase = query.reshape(__UpperCamelCase )
UpperCamelCase = key.reshape(__UpperCamelCase )
UpperCamelCase = value.reshape(__UpperCamelCase )
for path in paths:
UpperCamelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCamelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCamelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase = old_checkpoint[path["""old"""]]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = checkpoint["""time_embed.0.weight"""]
UpperCamelCase = checkpoint["""time_embed.0.bias"""]
UpperCamelCase = checkpoint["""time_embed.2.weight"""]
UpperCamelCase = checkpoint["""time_embed.2.bias"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase = checkpoint["""out.0.weight"""]
UpperCamelCase = checkpoint["""out.0.bias"""]
UpperCamelCase = checkpoint["""out.2.weight"""]
UpperCamelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
UpperCamelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCamelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
UpperCamelCase = middle_blocks[0]
UpperCamelCase = middle_blocks[1]
UpperCamelCase = middle_blocks[2]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
UpperCamelCase = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
UpperCamelCase = {}
for layer in output_block_layers:
UpperCamelCase ,UpperCamelCase = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
UpperCamelCase = [layer_name]
if len(__UpperCamelCase ) > 1:
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
UpperCamelCase = []
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
UpperCamelCase = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase = """.""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
UpperCamelCase = """.""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 35
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = ["""stem"""]
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> int:
"""simple docstring"""
return
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
F" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , lowerCamelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 720
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE__ = 'src/diffusers'
SCREAMING_SNAKE_CASE__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE__ = spec.loader.load_module()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
return line.startswith(__UpperCamelCase ) or len(__UpperCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __UpperCamelCase ) is not None
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = object_name.split(""".""" )
UpperCamelCase = 0
# First let's find the module where our object lives.
UpperCamelCase = parts[i]
while i < len(__UpperCamelCase ) and not os.path.isfile(os.path.join(__UpperCamelCase , F"{module}.py" ) ):
i += 1
if i < len(__UpperCamelCase ):
UpperCamelCase = os.path.join(__UpperCamelCase , parts[i] )
if i >= len(__UpperCamelCase ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(__UpperCamelCase , F"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase = """"""
UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCamelCase ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCamelCase ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase = line_index
while line_index < len(__UpperCamelCase ) and _should_continue(lines[line_index] , __UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase = lines[start_index:line_index]
return "".join(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
SCREAMING_SNAKE_CASE__ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
SCREAMING_SNAKE_CASE__ = re.compile(R'<FILL\s+[^>]*>')
def lowercase__ ( __UpperCamelCase )-> Dict:
UpperCamelCase = code.split("""\n""" )
UpperCamelCase = 0
while idx < len(__UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowercase__ ( __UpperCamelCase )-> Dict:
UpperCamelCase = len(get_indent(__UpperCamelCase ) ) > 0
if has_indent:
UpperCamelCase = F"class Bla:\n{code}"
UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCamelCase )
UpperCamelCase = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = style_docstrings_in_code(__UpperCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = []
UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCamelCase ):
UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = search.groups()
UpperCamelCase = find_code_in_diffusers(__UpperCamelCase )
UpperCamelCase = get_indent(__UpperCamelCase )
UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase = theoretical_indent
UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase = True
while line_index < len(__UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCamelCase ):
break
UpperCamelCase = lines[line_index]
UpperCamelCase = _should_continue(__UpperCamelCase , __UpperCamelCase ) and re.search(F"^{indent}# End copy" , __UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase = lines[start_index:line_index]
UpperCamelCase = """""".join(__UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__UpperCamelCase ) is None]
UpperCamelCase = """\n""".join(__UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCamelCase ) > 0:
UpperCamelCase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCamelCase = [_re_replace_pattern.search(__UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = pattern.groups()
UpperCamelCase = re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if option.strip() == "all-casing":
UpperCamelCase = re.sub(obja.lower() , obja.lower() , __UpperCamelCase )
UpperCamelCase = re.sub(obja.upper() , obja.upper() , __UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase = start_index + 1
if overwrite and len(__UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
return diffs
def lowercase__ ( __UpperCamelCase = False )-> Any:
UpperCamelCase = glob.glob(os.path.join(__UpperCamelCase , """**/*.py""" ) , recursive=__UpperCamelCase )
UpperCamelCase = []
for filename in all_files:
UpperCamelCase = is_copy_consistent(__UpperCamelCase , __UpperCamelCase )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(__UpperCamelCase ) > 0:
UpperCamelCase = """\n""".join(__UpperCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 721
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE__ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35
| 0
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def lowercase__ ( __UpperCamelCase )-> Optional[int]:
# getting number of pixels in the image
UpperCamelCase ,UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread('image_data/lena.jpg', 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 700
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = current_node
UpperCamelCase = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = previous_node
UpperCamelCase = current_node
UpperCamelCase = self.front
UpperCamelCase = previous_node
def A__ ( self ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase = self.rear.next
if self.rear:
UpperCamelCase = data
def A__ ( self ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase = self.front.data
UpperCamelCase = None
return data
UpperCamelCase = self.front
UpperCamelCase = old_front.next
UpperCamelCase = old_front.data
UpperCamelCase = None
return data
def A__ ( self ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ :
lowercase = None
@experimental
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return _map_with_joblib(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = num_proc if num_proc <= len(__UpperCamelCase ) else len(__UpperCamelCase )
UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(__UpperCamelCase ):
UpperCamelCase = len(__UpperCamelCase ) // num_proc
UpperCamelCase = len(__UpperCamelCase ) % num_proc
UpperCamelCase = div * index + min(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__UpperCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(__UpperCamelCase )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(__UpperCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
UpperCamelCase ,UpperCamelCase = None, None
if not disable_tqdm:
UpperCamelCase ,UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(__UpperCamelCase , initargs=__UpperCamelCase , initializer=__UpperCamelCase ) as pool:
UpperCamelCase = pool.map(__UpperCamelCase , __UpperCamelCase )
logger.info(F"Finished {num_proc} processes" )
UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(__UpperCamelCase )} objects" )
return mapped
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__UpperCamelCase ):
return joblib.Parallel()(
joblib.delayed(__UpperCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCamelCase = None
| 702
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> int:
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()]
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
UpperCamelCase = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 35
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase__ ( )-> Any:
UpperCamelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCamelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
UpperCamelCase = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 703
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> tuple[float, list[float]]:
UpperCamelCase = list(range(len(__UpperCamelCase ) ) )
UpperCamelCase = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase = """A painting of a squirrel eating a burger"""
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCamelCase = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase = images[0, 253:256, 253:256, -1]
UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = """stabilityai/stable-diffusion-2"""
UpperCamelCase ,UpperCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
UpperCamelCase ,UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase = scheduler_params
UpperCamelCase = """A painting of a squirrel eating a burger"""
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCamelCase = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase = images[0, 253:256, 253:256, -1]
UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 704
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( lowerCamelCase ):
lowercase = """deformable_detr"""
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config.get("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.d_model
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 35
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = backbone_out_indices
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = backbone_featmap_shape
UpperCamelCase = scope
UpperCamelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = DPTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def A__ ( self ) -> Any:
"""simple docstring"""
pass
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = False
UpperCamelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
UpperCamelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self ) -> Any:
"""simple docstring"""
pass
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = """add"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
UpperCamelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 705
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowercase__ ( __UpperCamelCase )-> Any:
UpperCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase ,UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
UpperCamelCase = mam_aaa["""model"""]
remove_ignore_keys_(__UpperCamelCase )
UpperCamelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
UpperCamelCase = MaMaaaConfig(
vocab_size=__UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
UpperCamelCase = state_dict["""decoder.embed_tokens.weight"""]
UpperCamelCase = MaMaaaForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=lowerCamelCase ):
lowercase = ["""speech"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
class a_ ( metaclass=lowerCamelCase ):
lowercase = ["""speech"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
| 706
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( lowerCamelCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = tf_padding
UpperCamelCase = int(last_hidden_size * depth_multiplier )
UpperCamelCase = output_stride
UpperCamelCase = hidden_act
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 35
| 0
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE__ = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
SCREAMING_SNAKE_CASE__ = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
SCREAMING_SNAKE_CASE__ = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="binary" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="warn" , ) -> int:
"""simple docstring"""
UpperCamelCase = recall_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE , zero_division=_SCREAMING_SNAKE_CASE , )
return {"recall": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 707
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 709
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( lowerCamelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict:
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 35
| 0
|
'''simple docstring'''
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = arr.split(""",""" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = [int(self.array[0] )] * len(self.array )
UpperCamelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('please input some numbers:')
SCREAMING_SNAKE_CASE__ = SubArray(whole_array)
SCREAMING_SNAKE_CASE__ = array.solve_sub_array()
print(('the results is:', re))
| 711
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = ["""stem"""]
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> int:
"""simple docstring"""
return
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
F" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , lowerCamelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 35
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand()
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand(args.accelerate_config_file )
class a_ ( lowerCamelCase ):
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
"""--accelerate-config_file""" , default=_SCREAMING_SNAKE_CASE , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = accelerate_config_file
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
UpperCamelCase = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
UpperCamelCase = """not installed"""
UpperCamelCase = UpperCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F"\t{accelerate_config}"
)
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_torch_available():
import torch
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
UpperCamelCase = flax.__version__
UpperCamelCase = jax.__version__
UpperCamelCase = jaxlib.__version__
UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
UpperCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 35
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a_ ( unittest.TestCase ):
lowercase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = AudioClassificationPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
# test with a raw waveform
UpperCamelCase = np.zeros((34000,) )
UpperCamelCase = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = examples
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(_SCREAMING_SNAKE_CASE )
@require_torchaudio
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
import datasets
# test with a local file
UpperCamelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
UpperCamelCase = dataset[0]["""audio"""]["""array"""]
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = """anton-l/wav2vec2-random-tiny-classifier"""
UpperCamelCase = pipeline("""audio-classification""" , model=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.ones((8000,) )
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
UpperCamelCase = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
UpperCamelCase = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCamelCase = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def A__ ( self ) -> int:
"""simple docstring"""
import datasets
UpperCamelCase = """superb/wav2vec2-base-superb-ks"""
UpperCamelCase = pipeline("""audio-classification""" , model=_SCREAMING_SNAKE_CASE )
UpperCamelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
UpperCamelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
UpperCamelCase = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
| 713
|
'''simple docstring'''
from math import factorial
def lowercase__ ( __UpperCamelCase = 20 )-> int:
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35
| 0
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a_ ( lowerCamelCase ):
lowercase = CustomTokenizer
pass
| 714
|
'''simple docstring'''
from math import sqrt
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = 0
for i in range(1 , int(sqrt(__UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCamelCase ):
total += i + n // i
elif i == sqrt(__UpperCamelCase ):
total += i
return total - n
def lowercase__ ( __UpperCamelCase = 10000 )-> int:
UpperCamelCase = sum(
i
for i in range(1 , __UpperCamelCase )
if sum_of_divisors(sum_of_divisors(__UpperCamelCase ) ) == i and sum_of_divisors(__UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35
| 0
|
'''simple docstring'''
import numpy as np
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
"""simple docstring"""
if red is not None:
UpperCamelCase = red
if green is not None:
UpperCamelCase = green
if blue is not None:
UpperCamelCase = blue
if red_edge is not None:
UpperCamelCase = red_edge
if nir is not None:
UpperCamelCase = nir
return True
def A__ ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def A__ ( self ) -> Any:
"""simple docstring"""
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def A__ ( self ) -> List[str]:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A__ ( self ) -> List[str]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A__ ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 ) -> Tuple:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A__ ( self ) -> int:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir / self.green) - 1
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.red - self.blue) / self.red
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A__ ( self ) -> Any:
"""simple docstring"""
return self.nir - self.green
def A__ ( self ) -> str:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def A__ ( self , _SCREAMING_SNAKE_CASE=0.1_6 ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def A__ ( self , _SCREAMING_SNAKE_CASE=0.5 ) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A__ ( self ) -> str:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def A__ ( self ) -> str:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A__ ( self ) -> Tuple:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A__ ( self ) -> str:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def A__ ( self ) -> List[str]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def A__ ( self ) -> int:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A__ ( self ) -> str:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A__ ( self ) -> Any:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def A__ ( self ) -> str:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 715
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ = {
'camembert-base': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = '▁'
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = CamembertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 716
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(__UpperCamelCase ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def lowercase__ ( __UpperCamelCase = 1777 , __UpperCamelCase = 1855 , __UpperCamelCase = 8 )-> int:
UpperCamelCase = base
for _ in range(1 , __UpperCamelCase ):
UpperCamelCase = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 717
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( __UpperCamelCase=None )-> Union[str, Any]:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""env""" )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = is_xpu_available()
UpperCamelCase = is_npu_available()
UpperCamelCase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__UpperCamelCase ),
"""PyTorch NPU available""": str(__UpperCamelCase ),
"""System RAM""": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCamelCase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F"\t{accelerate_config}"
)
print(__UpperCamelCase )
UpperCamelCase = accelerate_config
return info
def lowercase__ ( )-> int:
UpperCamelCase = env_command_parser()
UpperCamelCase = parser.parse_args()
env_command(__UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCamelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple:
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
UpperCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
UpperCamelCase = ["""key_proj""", """value_proj""", """query_proj"""]
UpperCamelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
UpperCamelCase = key.split(""".""" )
if attributes[0] == "lm_head":
UpperCamelCase = prophet
UpperCamelCase = prophet_old
else:
UpperCamelCase = prophet.prophetnet
UpperCamelCase = prophet_old.model
UpperCamelCase = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
UpperCamelCase = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase = old_model.weight
logger.info(F"{attribute} is initialized." )
UpperCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase = old_model.bias
logger.info(F"{attribute} is initialized" )
UpperCamelCase = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , """in_proj_weight""" ):
UpperCamelCase = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase = True
break
if attribute.isdigit():
UpperCamelCase = model[int(__UpperCamelCase )]
UpperCamelCase = old_model[int(__UpperCamelCase )]
else:
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
UpperCamelCase = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 719
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1 )-> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Dict:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCamelCase = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCamelCase = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCamelCase = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCamelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCamelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item
UpperCamelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCamelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCamelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCamelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None )-> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase = old_checkpoint[path]
UpperCamelCase = old_tensor.shape[0] // 3
UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase = query.reshape(__UpperCamelCase )
UpperCamelCase = key.reshape(__UpperCamelCase )
UpperCamelCase = value.reshape(__UpperCamelCase )
for path in paths:
UpperCamelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCamelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCamelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase = old_checkpoint[path["""old"""]]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = checkpoint["""time_embed.0.weight"""]
UpperCamelCase = checkpoint["""time_embed.0.bias"""]
UpperCamelCase = checkpoint["""time_embed.2.weight"""]
UpperCamelCase = checkpoint["""time_embed.2.bias"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase = checkpoint["""out.0.weight"""]
UpperCamelCase = checkpoint["""out.0.bias"""]
UpperCamelCase = checkpoint["""out.2.weight"""]
UpperCamelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
UpperCamelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCamelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
UpperCamelCase = middle_blocks[0]
UpperCamelCase = middle_blocks[1]
UpperCamelCase = middle_blocks[2]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
UpperCamelCase = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
UpperCamelCase = {}
for layer in output_block_layers:
UpperCamelCase ,UpperCamelCase = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
UpperCamelCase = [layer_name]
if len(__UpperCamelCase ) > 1:
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
UpperCamelCase = []
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
UpperCamelCase = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase = """.""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
UpperCamelCase = """.""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 35
| 0
|
from __future__ import annotations
def lowercase__ ( __UpperCamelCase )-> None:
create_state_space_tree(__UpperCamelCase , [] , 0 , [0 for i in range(len(__UpperCamelCase ) )] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> None:
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
for i in range(len(__UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase = True
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 , __UpperCamelCase )
current_sequence.pop()
UpperCamelCase = False
SCREAMING_SNAKE_CASE__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
SCREAMING_SNAKE_CASE__ = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 720
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = []
for part_id in partition_order:
UpperCamelCase = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> str:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(__UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> Any:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(2 )
UpperCamelCase = [1, 0]
UpperCamelCase = _generate_iterable_examples(__UpperCamelCase , __UpperCamelCase ) # Reverse the partitions.
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , __UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase ,UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> Tuple:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(1 )
UpperCamelCase = SparkExamplesIterable(__UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
UpperCamelCase = lambda __UpperCamelCase : x.reverse()
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [2, 1, 0] )
UpperCamelCase = SparkExamplesIterable(__UpperCamelCase ).shuffle_data_sources(__UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
UpperCamelCase ,UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> List[Any]:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
UpperCamelCase ,UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
UpperCamelCase ,UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( )-> Tuple:
UpperCamelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(__UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 721
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE__ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35
| 0
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 36
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = MgpstrTokenizer
UpperCamelCase : Dict = False
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[int] = False
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[int] =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
SCREAMING_SNAKE_CASE_: Optional[int] =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""tester"""
SCREAMING_SNAKE_CASE_: Tuple ="""tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: int ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.get_input_output_texts(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertNotEqual(len(lowerCAmelCase ) , 0 )
SCREAMING_SNAKE_CASE_: Any =tokenizer.decode(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_: Optional[Any] =ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE_: Any =ScoreSdeVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowerCAmelCase , return_dict=lowerCAmelCase )[
0
]
SCREAMING_SNAKE_CASE_: Dict =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: str =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""google/ncsnpp-church-256"""
SCREAMING_SNAKE_CASE_: List[Any] =UNetaDModel.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ScoreSdeVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 36
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 'convnextv2'
def __init__( self : str , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[Any]=224 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] =patch_size
SCREAMING_SNAKE_CASE_: str =num_stages
SCREAMING_SNAKE_CASE_: List[Any] =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE_: str =[3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: str =layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[int] =drop_path_rate
SCREAMING_SNAKE_CASE_: Dict =image_size
SCREAMING_SNAKE_CASE_: List[str] =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 36
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 1
|
"""simple docstring"""
from math import factorial
_UpperCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def __magic_name__ ( lowercase ):
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase ) )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase ) if sum_of_digit_factorial(lowercase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = 'vision-encoder-decoder'
UpperCamelCase : Union[str, Any] = True
def __init__( self : Optional[Any] , **lowerCAmelCase : int ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE_: Optional[int] =encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE_: str =decoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE_: List[str] =AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Any ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_: List[Any] =self.encoder.to_dict()
SCREAMING_SNAKE_CASE_: Tuple =self.decoder.to_dict()
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.__class__.model_type
return output
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : int ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =OrderedDict()
SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE_: int ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : "PreTrainedTokenizerBase" , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict()
SCREAMING_SNAKE_CASE_: Dict =super().generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =dummy_input["""input_ids"""].shape
SCREAMING_SNAKE_CASE_: Any =(batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE_: List[str] =dummy_input.pop("""input_ids""" )
SCREAMING_SNAKE_CASE_: Tuple =dummy_input.pop("""attention_mask""" )
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(lowerCAmelCase )
return common_inputs
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Tuple ) -> None:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str , lowerCAmelCase : PretrainedConfig ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" ) -> OnnxConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase , lowerCAmelCase )
| 36
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 1
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[Any] =torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =list(model.children() )[:-2]
SCREAMING_SNAKE_CASE_: Optional[int] =nn.Sequential(*lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.pool(self.model(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Any =torch.flatten(lowerCAmelCase , start_dim=2 )
SCREAMING_SNAKE_CASE_: int =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( UpperCAmelCase__ ):
def __init__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.dirname(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer
SCREAMING_SNAKE_CASE_: Any =labels
SCREAMING_SNAKE_CASE_: Any =len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =max_seq_length
SCREAMING_SNAKE_CASE_: Union[str, Any] =transforms
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE_: int =sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE_: str =1
SCREAMING_SNAKE_CASE_: int =Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
SCREAMING_SNAKE_CASE_: int =self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =[len(row["""sentence"""] ) for row in batch]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase ), max(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(lowercase , lowercase , dtype=torch.long )
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(lowercase , lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowercase , lowercase ) ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =input_row["""sentence"""]
SCREAMING_SNAKE_CASE_: List[Any] =1
SCREAMING_SNAKE_CASE_: Any =torch.stack([row["""image"""] for row in batch] )
SCREAMING_SNAKE_CASE_: Tuple =torch.stack([row["""label"""] for row in batch] )
SCREAMING_SNAKE_CASE_: Dict =torch.stack([row["""image_start_token"""] for row in batch] )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __magic_name__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __magic_name__ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __magic_name__ ( lowercase ):
return "".join(sorted(lowercase ) )
def __magic_name__ ( lowercase ):
return word_by_signature[signature(lowercase )]
_UpperCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = """▁"""
_UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_UpperCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_UpperCAmelCase = {
"""facebook/s2t-small-librispeech-asr""": 1_0_2_4,
}
_UpperCAmelCase = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_UpperCAmelCase = {"""mustc""": MUSTC_LANGS}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = MAX_MODEL_INPUT_SIZES
UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
UpperCamelCase : List[int] = []
def __init__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : List[str]="</s>" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : Dict="<unk>" , lowerCAmelCase : Tuple=False , lowerCAmelCase : str=False , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , do_upper_case=lowerCAmelCase , do_lower_case=lowerCAmelCase , tgt_lang=lowerCAmelCase , lang_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[Any] =do_upper_case
SCREAMING_SNAKE_CASE_: str =do_lower_case
SCREAMING_SNAKE_CASE_: str =load_json(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =spm_file
SCREAMING_SNAKE_CASE_: Optional[int] =load_spm(lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =lang_codes
SCREAMING_SNAKE_CASE_: str =LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE_: List[Any] =[f'''<lang:{lang}>''' for lang in self.langs]
SCREAMING_SNAKE_CASE_: Any ={lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
SCREAMING_SNAKE_CASE_: Tuple =self.lang_tokens
SCREAMING_SNAKE_CASE_: Any =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE_: List[Any] ={}
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_: Any =[lang_code_id]
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> str:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase , self.unk_token )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[]
SCREAMING_SNAKE_CASE_: int =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE_: List[Any] =self.sp_model.decode(lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE_: Optional[int] =[]
else:
current_sub_tokens.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.sp_model.decode(lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_: Optional[Any] =None
return state
def __setstate__( self : Tuple , lowerCAmelCase : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE_: Any ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =Path(lowerCAmelCase )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
SCREAMING_SNAKE_CASE_: Any =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
SCREAMING_SNAKE_CASE_: Tuple =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE_: Optional[int] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (str(lowerCAmelCase ), str(lowerCAmelCase ))
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def __magic_name__ ( lowercase ):
with open(lowercase , """r""" ) as f:
return json.load(lowercase )
def __magic_name__ ( lowercase , lowercase ):
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""-f""" )
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
return args.f
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase )
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase : ClassVar[Features] = Features({'image': Image()} )
UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
UpperCamelCase : str = "image"
UpperCamelCase : str = "labels"
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Any ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE_: int =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_: Optional[int] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_: int =features[self.label_column]
SCREAMING_SNAKE_CASE_: Optional[int] =label_schema
return task_template
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {"""UserAgent""": UserAgent().random}
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =script.contents[0]
SCREAMING_SNAKE_CASE_: List[str] =json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a :
def __init__( self : Tuple , lowerCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =f'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_json()
def lowerCamelCase__ ( self : str ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =requests.get(self.url , headers=lowerCAmelCase ).text
SCREAMING_SNAKE_CASE_: int =BeautifulSoup(lowerCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
'''simple docstring'''
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Any ) -> str:
'''simple docstring'''
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowerCamelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowerCamelCase__ ( self : List[Any] ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def __magic_name__ ( lowercase = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE_: Tuple =InstagramUser(lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Dict =3
SCREAMING_SNAKE_CASE_: List[Any] =(32, 32)
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
def extract(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any ):
class a :
def __init__( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =torch.ones([0] )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: Tuple =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =self.dummy_vae
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: Tuple =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: str =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.dummy_vae
SCREAMING_SNAKE_CASE_: int =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: int =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Dict =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Dict =np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: Dict =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: List[str] =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.dummy_vae
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Tuple =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE_: str =unet.half()
SCREAMING_SNAKE_CASE_: Dict =vae.half()
SCREAMING_SNAKE_CASE_: int =bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =(
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
SCREAMING_SNAKE_CASE_: str =40_0366_0346
SCREAMING_SNAKE_CASE_: Dict =7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: Tuple =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Tuple =[0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[Any] =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =[0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: Any =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str ="""padme amidala taking a bath artwork, safe for work, no nudity"""
SCREAMING_SNAKE_CASE_: int =27_3497_1755
SCREAMING_SNAKE_CASE_: Union[str, Any] =7
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: List[str] =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =[0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: List[str] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Dict =[0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE_: Any =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =(
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
SCREAMING_SNAKE_CASE_: Optional[Any] =10_4435_5234
SCREAMING_SNAKE_CASE_: Tuple =12
SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: List[Any] =output.images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Dict =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple ={
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_: int ={
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE_: Union[str, Any] =f'''{src_lang}-{tgt_lang}'''
SCREAMING_SNAKE_CASE_: int =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(lowercase , exist_ok=lowercase )
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , """README.md""" )
print(f'''Generating {path}''' )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
# make sure we are under the root of the project
_UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = model_name.split("""-""")
_UpperCAmelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a ( pl.LightningModule ):
def __init__( self : List[Any] , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Any =model
SCREAMING_SNAKE_CASE_: Optional[Any] =2
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( lowercase , lowercase , lowercase ):
# load longformer model from model identifier
SCREAMING_SNAKE_CASE_: Union[str, Any] =LongformerModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =LightningModel(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.load(lowercase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
SCREAMING_SNAKE_CASE_: int =LongformerForQuestionAnswering.from_pretrained(lowercase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowercase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCAmelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __magic_name__ ( lowercase ):
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_: Optional[int] =k.replace(lowercase , lowercase )
return k
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =DEFAULTS.copy()
cfg_kwargs.update(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =PegasusConfig(**lowercase )
SCREAMING_SNAKE_CASE_: Any =PegasusForConditionalGeneration(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_: int ={}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_: Any =rename_state_dict_key(lowercase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_: Optional[int] =v.T
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_: str =torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_: Optional[int] =mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_: Dict =mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={k: torch.zeros_like(lowercase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =torch_model.model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE_: List[str] =[
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __magic_name__ ( lowercase="./ckpt/aeslc/model.ckpt-32000" ):
SCREAMING_SNAKE_CASE_: Optional[int] =tf.train.list_variables(lowercase )
SCREAMING_SNAKE_CASE_: Any ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =["""Adafactor""", """global_step"""]
for name, shape in tqdm(lowercase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_: List[str] =tf.train.load_variable(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =array
return tf_weights
def __magic_name__ ( lowercase , lowercase ):
# save tokenizer first
SCREAMING_SNAKE_CASE_: List[Any] =Path(lowercase ).parent.name
SCREAMING_SNAKE_CASE_: Optional[int] =task_specific_params[f'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE_: List[str] =PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowercase )
# convert model
SCREAMING_SNAKE_CASE_: Optional[Any] =get_tf_weights_as_numpy(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE_: Any =task_specific_params
SCREAMING_SNAKE_CASE_: List[str] =convert_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: int =torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(lowercase , Path(lowercase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
if args.save_dir is None:
_UpperCAmelCase = Path(args.tf_ckpt_path).parent.name
_UpperCAmelCase = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
import os
import sys
_UpperCAmelCase = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_UpperCAmelCase = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoConfig.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoTokenizer.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoModel.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoModelForCausalLM.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoModelForMaskedLM.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoModelForSequenceClassification.from_pretrained(*lowercase , **lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __magic_name__ ( *lowercase , **lowercase ):
return AutoModelForQuestionAnswering.from_pretrained(*lowercase , **lowercase )
| 36
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =384
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_: Any =[3, 3, 9, 3]
SCREAMING_SNAKE_CASE_: List[Any] =[96, 192, 384, 768]
if "small" in model_name:
SCREAMING_SNAKE_CASE_: str =[3, 3, 27, 3]
SCREAMING_SNAKE_CASE_: Any =[96, 192, 384, 768]
if "base" in model_name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[3, 3, 27, 3]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[128, 256, 512, 1024]
SCREAMING_SNAKE_CASE_: List[Any] =512
if "large" in model_name:
SCREAMING_SNAKE_CASE_: List[str] =[3, 3, 27, 3]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[192, 384, 768, 1536]
SCREAMING_SNAKE_CASE_: Tuple =768
if "xlarge" in model_name:
SCREAMING_SNAKE_CASE_: Optional[Any] =[3, 3, 27, 3]
SCREAMING_SNAKE_CASE_: int =[256, 512, 1024, 2048]
SCREAMING_SNAKE_CASE_: int =1024
# set label information
SCREAMING_SNAKE_CASE_: Any =150
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: Dict ="""ade20k-id2label.json"""
SCREAMING_SNAKE_CASE_: List[Any] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: List[str] ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Tuple ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =ConvNextConfig(
depths=lowercase , hidden_sizes=lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE_: Optional[Any] =UperNetConfig(
backbone_config=lowercase , auxiliary_in_channels=lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase , )
return config
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =[]
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =dct.pop(lowercase )
SCREAMING_SNAKE_CASE_: int =val
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str ={
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
SCREAMING_SNAKE_CASE_: Tuple =model_name_to_url[model_name]
SCREAMING_SNAKE_CASE_: str =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""state_dict"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =get_upernet_config(lowercase )
SCREAMING_SNAKE_CASE_: str =UperNetForSemanticSegmentation(lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(lowercase )
if "bn" in key:
SCREAMING_SNAKE_CASE_: Dict =key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE_: Dict =val
# rename keys
SCREAMING_SNAKE_CASE_: Dict =create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
model.load_state_dict(lowercase )
# verify on image
SCREAMING_SNAKE_CASE_: Optional[Any] ="""https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE_: str =Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE_: List[str] =SegformerImageProcessor()
SCREAMING_SNAKE_CASE_: List[str] =processor(lowercase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Tuple =model(lowercase )
if model_name == "upernet-convnext-tiny":
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
SCREAMING_SNAKE_CASE_: str =torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
SCREAMING_SNAKE_CASE_: str =torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__ ( lowercase ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowercase , )
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =image[0].size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_: Dict =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_: Optional[Any] =np.concatenate(lowercase , axis=0 )
SCREAMING_SNAKE_CASE_: Dict =np.array(lowercase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_: Union[str, Any] =image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_: List[Any] =2.0 * image - 1.0
SCREAMING_SNAKE_CASE_: Tuple =torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_: str =torch.cat(lowercase , dim=0 )
return image
def __magic_name__ ( lowercase ):
if isinstance(lowercase , torch.Tensor ):
return mask
elif isinstance(lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: Optional[int] =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =mask[0].size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_: Dict =[np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_: Dict =np.concatenate(lowercase , axis=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.from_numpy(lowercase )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_: Dict =torch.cat(lowercase , dim=0 )
return mask
class a ( UpperCAmelCase__ ):
UpperCamelCase : UNetaDModel
UpperCamelCase : RePaintScheduler
def __init__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self : str , lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase : int = 250 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 10 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =image
SCREAMING_SNAKE_CASE_: int =_preprocess_image(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_: Union[str, Any] =_preprocess_mask(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_: List[Any] =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE_: Any =original_image.shape
SCREAMING_SNAKE_CASE_: List[Any] =randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.device )
SCREAMING_SNAKE_CASE_: List[str] =eta
SCREAMING_SNAKE_CASE_: List[str] =self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_: List[Any] =generator[0] if isinstance(lowerCAmelCase , lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_: Tuple =self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_: Optional[Any] =self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_: List[Any] =self.scheduler.undo_step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =t
SCREAMING_SNAKE_CASE_: Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_: Optional[int] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_: Tuple =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 1
|
"""simple docstring"""
from manim import *
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_: int =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: List[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Optional[int] =VGroup(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =[mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
gpu.align_to(lowerCAmelCase , lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Tuple =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE_: List[Any] =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , )
SCREAMING_SNAKE_CASE_: List[Any] =MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_: Any =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase , run_time=2.5 ) , Write(lowerCAmelCase ) , Write(lowerCAmelCase ) )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
for i, rect in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_: str =0.4_6 / 4
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase , buff=0.0 )
cpu_targs.append(lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase ) )
second_animations.append(MoveToTarget(lowerCAmelCase , run_time=1.5 ) )
self.play(*lowerCAmelCase )
self.play(*lowerCAmelCase )
self.wait()
| 36
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : bool = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Tuple =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =False
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.Dropout(p=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =TaLayerNorm(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =nn.Dropout(p=lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.token_embedder(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
SCREAMING_SNAKE_CASE_: List[Any] =encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_: Dict =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_: Dict =lyr(lowerCAmelCase , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: int =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 36
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 1
|
"""simple docstring"""
_UpperCAmelCase = {}
def __magic_name__ ( lowercase , lowercase , lowercase ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE_: Optional[int] =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE_: int =_calculate(days - 1 , lowercase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE_: Optional[int] =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE_: str =_calculate(days - 1 , lowercase , 0 )
SCREAMING_SNAKE_CASE_: Dict =state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE_: Optional[Any] =prizestrings
return prizestrings
def __magic_name__ ( lowercase = 30 ):
return _calculate(lowercase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__ ( lowercase ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE_: List[str] =try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
SCREAMING_SNAKE_CASE_: Union[str, Any] =PipelineDataFormat.from_str(
format=lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase , lowercase )
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =nlp
SCREAMING_SNAKE_CASE_: Optional[int] =reader
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE_: Dict =nlp(**lowerCAmelCase ) if self._reader.is_multi_columns else nlp(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
outputs.append(lowerCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE_: str =self._reader.save_binary(lowerCAmelCase )
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(lowerCAmelCase )
| 36
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 1
|
"""simple docstring"""
import math
import random
def __magic_name__ ( lowercase , lowercase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_UpperCAmelCase = 0.0_2
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowercase ):
# Forward propagation
SCREAMING_SNAKE_CASE_: Optional[int] =sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_: List[Any] =(expected / 100) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_: Optional[int] =layer_1_error * sigmoid_function(lowercase , lowercase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = int(input("""Expected value: """))
_UpperCAmelCase = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = 'encodec'
def __init__( self : Optional[Any] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCAmelCase : Optional[int]=2_4000 , lowerCAmelCase : int=1 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=128 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : str=1 , lowerCAmelCase : Optional[Any]=[8, 5, 4, 2] , lowerCAmelCase : Tuple="weight_norm" , lowerCAmelCase : int=7 , lowerCAmelCase : Tuple=7 , lowerCAmelCase : str=3 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="reflect" , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=1.0 , lowerCAmelCase : Tuple=1024 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[int] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =target_bandwidths
SCREAMING_SNAKE_CASE_: str =sampling_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] =audio_channels
SCREAMING_SNAKE_CASE_: int =normalize
SCREAMING_SNAKE_CASE_: List[str] =chunk_length_s
SCREAMING_SNAKE_CASE_: Union[str, Any] =overlap
SCREAMING_SNAKE_CASE_: List[Any] =hidden_size
SCREAMING_SNAKE_CASE_: str =num_filters
SCREAMING_SNAKE_CASE_: Dict =num_residual_layers
SCREAMING_SNAKE_CASE_: int =upsampling_ratios
SCREAMING_SNAKE_CASE_: Optional[int] =norm_type
SCREAMING_SNAKE_CASE_: List[str] =kernel_size
SCREAMING_SNAKE_CASE_: int =last_kernel_size
SCREAMING_SNAKE_CASE_: List[str] =residual_kernel_size
SCREAMING_SNAKE_CASE_: Dict =dilation_growth_rate
SCREAMING_SNAKE_CASE_: Tuple =use_causal_conv
SCREAMING_SNAKE_CASE_: Optional[Any] =pad_mode
SCREAMING_SNAKE_CASE_: List[str] =compress
SCREAMING_SNAKE_CASE_: Optional[Any] =num_lstm_layers
SCREAMING_SNAKE_CASE_: Tuple =trim_right_ratio
SCREAMING_SNAKE_CASE_: Tuple =codebook_size
SCREAMING_SNAKE_CASE_: Tuple =codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE_: int =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowerCAmelCase )
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
import inspect
import unittest
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE_: Any =inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE_: List[Any] ="""k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE_: Optional[int] ="""invisible-watermark"""
assert backend in deps, f'''{backend} is not in the deps table!'''
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , *,
lowerCAmelCase : int = 4 , lowerCAmelCase : int = 768 , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =nn.Parameter(torch.zeros(lowerCAmelCase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE_: Tuple =clip_extra_context_tokens
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(
lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =nn.LayerNorm(lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , *, lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =image_embeddings.shape[0]
SCREAMING_SNAKE_CASE_: Any =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =classifier_free_guidance_embeddings.expand(
lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE_: List[Any] =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE_: Optional[Any] =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE_: Dict =self.embedding_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE_: Optional[Any] =self.clip_extra_context_tokens_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =clip_extra_context_tokens.reshape(lowerCAmelCase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE_: str =clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.encoder_hidden_states_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.text_encoder_hidden_states_norm(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
UpperCamelCase : List[str] = JukeboxTokenizer
UpperCamelCase : Union[str, Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Any =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: Any =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
SCREAMING_SNAKE_CASE_: Dict =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: int =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 'xlnet'
UpperCamelCase : List[str] = ['mems']
UpperCamelCase : int = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , lowerCAmelCase : Optional[int]=3_2000 , lowerCAmelCase : List[Any]=1024 , lowerCAmelCase : Dict=24 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Dict=4096 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple="bi" , lowerCAmelCase : Tuple=0.0_2 , lowerCAmelCase : List[str]=1E-12 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : int=False , lowerCAmelCase : int="last" , lowerCAmelCase : int=True , lowerCAmelCase : Any="tanh" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=5 , lowerCAmelCase : int=5 , lowerCAmelCase : Dict=5 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Union[str, Any]=2 , **lowerCAmelCase : Dict , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =d_model
SCREAMING_SNAKE_CASE_: Optional[int] =n_layer
SCREAMING_SNAKE_CASE_: int =n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
SCREAMING_SNAKE_CASE_: Optional[int] =d_model // n_head
SCREAMING_SNAKE_CASE_: Optional[Any] =ff_activation
SCREAMING_SNAKE_CASE_: Any =d_inner
SCREAMING_SNAKE_CASE_: List[Any] =untie_r
SCREAMING_SNAKE_CASE_: Optional[Any] =attn_type
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: Dict =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[Any] =dropout
SCREAMING_SNAKE_CASE_: Optional[Any] =mem_len
SCREAMING_SNAKE_CASE_: Optional[int] =reuse_len
SCREAMING_SNAKE_CASE_: Optional[int] =bi_data
SCREAMING_SNAKE_CASE_: Optional[Any] =clamp_len
SCREAMING_SNAKE_CASE_: Tuple =same_length
SCREAMING_SNAKE_CASE_: int =summary_type
SCREAMING_SNAKE_CASE_: List[str] =summary_use_proj
SCREAMING_SNAKE_CASE_: str =summary_activation
SCREAMING_SNAKE_CASE_: Tuple =summary_last_dropout
SCREAMING_SNAKE_CASE_: Optional[int] =start_n_top
SCREAMING_SNAKE_CASE_: List[Any] =end_n_top
SCREAMING_SNAKE_CASE_: int =bos_token_id
SCREAMING_SNAKE_CASE_: Any =pad_token_id
SCREAMING_SNAKE_CASE_: List[Any] =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] =kwargs["""use_cache"""]
SCREAMING_SNAKE_CASE_: str =use_mems_eval
SCREAMING_SNAKE_CASE_: List[str] =use_mems_train
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""OwlViTFeatureExtractor"""]
_UpperCAmelCase = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( lowercase , lowercase ):
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_: str =SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE_: int =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_: str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_: Tuple =SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def __magic_name__ ( lowercase ):
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: Union[str, Any] =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(lowercase , """tmp.sql""" )
SCREAMING_SNAKE_CASE_: Tuple =SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE_: Optional[Any] =iter_sql_file(lowercase )
SCREAMING_SNAKE_CASE_: str =iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , """tmp.sql""" )
SCREAMING_SNAKE_CASE_: str =SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE_: List[str] =iter_sql_file(lowercase )
SCREAMING_SNAKE_CASE_: int =iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: Tuple =os.path.join(lowercase , """tmp.sql""" )
SCREAMING_SNAKE_CASE_: int =SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
class a :
def __init__( self : Tuple ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: dict[str, TrieNode] ={} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE_: List[Any] =False
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE_: List[Any] =TrieNode()
SCREAMING_SNAKE_CASE_: int =curr.nodes[char]
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : str ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE_: Dict =curr.nodes[char]
return curr.is_leaf
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : str ) -> None:
'''simple docstring'''
def _delete(lowerCAmelCase : TrieNode , lowerCAmelCase : str , lowerCAmelCase : int ) -> bool:
if index == len(lowerCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE_: Optional[Any] =False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE_: str =word[index]
SCREAMING_SNAKE_CASE_: str =curr.nodes.get(lowerCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE_: int =_delete(lowerCAmelCase , lowerCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowerCAmelCase , 0 )
def __magic_name__ ( lowercase , lowercase ):
if node.is_leaf:
print(lowercase , end=""" """ )
for key, value in node.nodes.items():
print_words(lowercase , word + key )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int ="""banana bananas bandana band apple all beast""".split()
SCREAMING_SNAKE_CASE_: int =TrieNode()
root.insert_many(lowercase )
# print_words(root, "")
assert all(root.find(lowercase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __magic_name__ ( lowercase , lowercase ):
print(str(lowercase ) , """works!""" if passes else """doesn't work :(""" )
def __magic_name__ ( ):
assert test_trie()
def __magic_name__ ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if isinstance(lowercase , lowercase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(lowercase , lowercase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE_: Optional[Any] =False
if num < 0:
SCREAMING_SNAKE_CASE_: List[str] =True
SCREAMING_SNAKE_CASE_: List[Any] =-num
SCREAMING_SNAKE_CASE_: list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowercase ) for e in binary )
return "0b" + "".join(str(lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = 'timm_backbone'
def __init__( self : Tuple , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : int=None , **lowerCAmelCase : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =backbone
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: int =features_only
SCREAMING_SNAKE_CASE_: int =use_pretrained_backbone
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: str =out_indices if out_indices is not None else (-1,)
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def __magic_name__ ( lowercase , lowercase = 2 , lowercase = 1 , lowercase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase , lowercase , lowercase ) -> int:
return (pow(lowercase , 2 ) + step) % modulus
for _ in range(lowercase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE_: Any =seed
SCREAMING_SNAKE_CASE_: Dict =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE_: Optional[Any] =rand_fn(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =rand_fn(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =rand_fn(lowercase , lowercase , lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE_: Optional[Any] =gcd(hare - tortoise , lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE_: Dict =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_UpperCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 36
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: Optional[Any] ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: str =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Tuple ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[str] ="""std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_: Optional[int] =BitConfig(
conv_layer=lowercase , num_labels=1000 , idalabel=lowercase , labelaid=lowercase , )
return config
def __magic_name__ ( lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""bit.""" + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_: Any ="""bit.encoder.""" + name
return name
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: List[Any] =get_config(lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE_: str =create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_: int =timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Tuple =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Any =val.squeeze() if """head""" in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_: int =BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
SCREAMING_SNAKE_CASE_: Optional[Any] =create_transform(**resolve_data_config({} , model=lowercase ) )
SCREAMING_SNAKE_CASE_: Tuple =transform.transforms
SCREAMING_SNAKE_CASE_: List[str] ={
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_: int =BitImageProcessor(
do_resize=lowercase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_img()
SCREAMING_SNAKE_CASE_: List[str] =transform(lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_: List[Any] =processor(lowercase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str =model(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_: str =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 1
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
_UpperCAmelCase = """us-east-1""" # defaults region
@dataclass
class a :
UpperCamelCase : str
UpperCamelCase : List[Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
UpperCamelCase : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
UpperCamelCase : str = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
return f'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =SageMakerTestEnvironment(framework=request.cls.framework )
| 36
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 1
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_UpperCAmelCase = logging.getLogger(__name__)
def __magic_name__ ( lowercase=2 , lowercase=3 , lowercase=16 , lowercase = 10 , lowercase = 2 ):
def get_dataset(lowercase ):
SCREAMING_SNAKE_CASE_: Dict =torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE_: Dict =get_dataset(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =get_dataset(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
SCREAMING_SNAKE_CASE_: Optional[Any] =DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: int =[]
for epoch in range(lowercase ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =batch
SCREAMING_SNAKE_CASE_: Dict =model(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.nn.functional.mse_loss(lowercase , lowercase )
accelerator.backward(lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
def __init__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[Any] =nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_: Tuple =nn.Parameter(torch.randn(1 ) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_: List[Any] =DummyModel()
SCREAMING_SNAKE_CASE_: List[Any] =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: Union[str, Any] =ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase , automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE_: Dict =Accelerator(project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_: str =DummyModel()
SCREAMING_SNAKE_CASE_: List[str] =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_: Dict =Accelerator()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowerCAmelCase , """initial""" )
accelerator.save_state(lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Tuple =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: int =optimizer.state_dict()
SCREAMING_SNAKE_CASE_: Optional[Any] =train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): int =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: str =optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_: Tuple =DummyModel()
SCREAMING_SNAKE_CASE_: Tuple =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: List[Any] =Accelerator()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): int =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: Union[str, Any] =optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
SCREAMING_SNAKE_CASE_: Any =os.path.join(lowerCAmelCase , """checkpoint""" )
accelerator.save_state(lowerCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: List[str] =optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_: Optional[int] =DummyModel()
SCREAMING_SNAKE_CASE_: Any =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: Optional[Any] =ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE_: Dict =Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Optional[Any] =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: str =optimizer.state_dict()
SCREAMING_SNAKE_CASE_: Any =train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Any =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: Any =optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =DummyModel()
SCREAMING_SNAKE_CASE_: Any =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: List[Any] =ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Union[str, Any] =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: Union[str, Any] =optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Optional[Any] =model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_: int =optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_: int =DummyModel()
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_: Optional[Any] =Accelerator()
with self.assertRaises(lowerCAmelCase ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_: List[str] =DummyModel()
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_: int =torch.optim.lr_scheduler.StepLR(lowerCAmelCase , step_size=1 , gamma=0.9_9 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =dummy_dataloaders()
SCREAMING_SNAKE_CASE_: Tuple =ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE_: Dict =Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_: List[Any] =scheduler.state_dict()
train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(lowerCAmelCase , scheduler.state_dict() )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_: Any =DummyModel()
SCREAMING_SNAKE_CASE_: Optional[int] =ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_: Union[str, Any] =Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.prepare(lowerCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCAmelCase = """/tmp/accelerate/state_checkpointing"""
_UpperCAmelCase = DummyModel()
_UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_UpperCAmelCase, _UpperCAmelCase = dummy_dataloaders()
_UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_UpperCAmelCase, _UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_UpperCAmelCase = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
_UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
_UpperCAmelCase = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
_UpperCAmelCase = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( lowercase ):
def wrapper(*lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =timeit.default_timer()
SCREAMING_SNAKE_CASE_: Tuple =func(*lowercase , **lowercase )
SCREAMING_SNAKE_CASE_: str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_: str =func.__name__
return wrapper
def __magic_name__ ( lowercase , lowercase=100 , lowercase=None ):
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: Union[str, Any] =seq_shapes or {}
for i in range(lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowercase , _ArrayXD ):
SCREAMING_SNAKE_CASE_: Dict =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowercase , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_: int ="""The small grey turtle was surprisingly fast when challenged."""
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowercase , datasets.Sequence ):
while isinstance(lowercase , datasets.Sequence ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =v.feature
SCREAMING_SNAKE_CASE_: Optional[int] =seq_shapes[k]
SCREAMING_SNAKE_CASE_: Any =np.random.rand(*lowercase ).astype(v.dtype )
SCREAMING_SNAKE_CASE_: Tuple =data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( lowercase , lowercase , lowercase=100 , lowercase=None ):
SCREAMING_SNAKE_CASE_: Any =generate_examples(lowercase , num_examples=lowercase , seq_shapes=lowercase )
with ArrowWriter(features=lowercase , path=lowercase ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_: Tuple =features.encode_example(lowercase )
writer.write(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_file(filename=lowercase , info=datasets.DatasetInfo(features=lowercase ) )
return dataset
| 36
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_UpperCAmelCase = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
SCREAMING_SNAKE_CASE_: Any =self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_: List[Any] =comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE_: Any =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE_: Tuple =black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase )
with open(lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_: Dict ="""TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , lowerCAmelCase , lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
| 36
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 1
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE_: Optional[Any] =cst_fwd.get(lowercase , np.inf )
SCREAMING_SNAKE_CASE_: Optional[Any] =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE_: List[str] =new_cost_f
SCREAMING_SNAKE_CASE_: Any =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE_: Union[str, Any] =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =-1
SCREAMING_SNAKE_CASE_: List[str] =set()
SCREAMING_SNAKE_CASE_: List[Any] =set()
SCREAMING_SNAKE_CASE_: int ={source: 0}
SCREAMING_SNAKE_CASE_: List[str] ={destination: 0}
SCREAMING_SNAKE_CASE_: Dict ={source: None}
SCREAMING_SNAKE_CASE_: int ={destination: None}
SCREAMING_SNAKE_CASE_: PriorityQueue[Any] =PriorityQueue()
SCREAMING_SNAKE_CASE_: PriorityQueue[Any] =PriorityQueue()
SCREAMING_SNAKE_CASE_: Tuple =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =queue_forward.get()
visited_forward.add(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =queue_backward.get()
visited_backward.add(lowercase )
SCREAMING_SNAKE_CASE_: Dict =pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
SCREAMING_SNAKE_CASE_: Tuple =pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE_: List[str] =shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {"""facebook/bart-base""": BartForConditionalGeneration}
_UpperCAmelCase = {"""facebook/bart-base""": BartTokenizer}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=lowercase , default=lowercase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=lowercase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=lowercase , default=lowercase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase , )
parser.add_argument(
"""--config_name""" , type=lowercase , default=lowercase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=lowercase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=lowercase , default=lowercase , help="""Where to store the final ONNX file.""" )
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
return args
def __magic_name__ ( lowercase , lowercase="cpu" ):
SCREAMING_SNAKE_CASE_: Optional[int] =model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: List[Any] =None
SCREAMING_SNAKE_CASE_: Any =0
return huggingface_model, tokenizer
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: int =torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] ="""My friends are cool but they eat too many carbs."""
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=lowercase , )
logger.info("""Model exported to {}""".format(lowercase ) )
SCREAMING_SNAKE_CASE_: Tuple =remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(lowercase ) )
SCREAMING_SNAKE_CASE_: Any =onnxruntime.InferenceSession(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =ort_sess.run(
lowercase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(lowercase ),
"""max_length""": np.array(lowercase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple =parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] =5
SCREAMING_SNAKE_CASE_: List[str] =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_: Dict =torch.device(args.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(lowercase )
if args.max_length:
SCREAMING_SNAKE_CASE_: str =args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE_: List[str] =args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE_: List[Any] =args.output_file_path
else:
SCREAMING_SNAKE_CASE_: int ="""BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( lowercase , lowercase , lowercase ):
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase="attention" ):
SCREAMING_SNAKE_CASE_: Tuple =np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_: Tuple =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE_: str =np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_: Dict =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE_: Tuple =np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_: Any =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE_: Dict =np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_: List[str] =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=False ):
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: List[str] =params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_: List[Any] =params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_: Any =(wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE_: str =params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def __magic_name__ ( lowercase , *, lowercase , lowercase , lowercase = False ):
SCREAMING_SNAKE_CASE_: List[Any] =traverse_util.flatten_dict(variables["""target"""] )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""/""".join(lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE_: Optional[int] ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase )
SCREAMING_SNAKE_CASE_: Tuple =collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE_: List[Any] =old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_: int =tax_layer_norm_lookup(lowercase , lowercase , """encoder""" , """pre_attention_layer_norm""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =tax_attention_lookup(lowercase , lowercase , """encoder""" , """attention""" )
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm
SCREAMING_SNAKE_CASE_: Optional[Any] =k.T
SCREAMING_SNAKE_CASE_: Tuple =o.T
SCREAMING_SNAKE_CASE_: Any =q.T
SCREAMING_SNAKE_CASE_: Optional[int] =v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE_: Optional[Any] =tax_layer_norm_lookup(lowercase , lowercase , """encoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =tax_mlp_lookup(lowercase , lowercase , """encoder""" , lowercase )
SCREAMING_SNAKE_CASE_: List[str] =layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Dict =wi[0].T
SCREAMING_SNAKE_CASE_: Optional[int] =wi[1].T
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =wi.T
SCREAMING_SNAKE_CASE_: Tuple =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_: List[str] =tax_relpos_bias_lookup(
lowercase , lowercase , """encoder""" ).T
SCREAMING_SNAKE_CASE_: List[Any] =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
SCREAMING_SNAKE_CASE_: List[Any] =tax_relpos_bias_lookup(
lowercase , 0 , """encoder""" ).T
SCREAMING_SNAKE_CASE_: Any =tax_relpos_bias_lookup(
lowercase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_: int =tax_layer_norm_lookup(lowercase , lowercase , """decoder""" , """pre_self_attention_layer_norm""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =tax_attention_lookup(lowercase , lowercase , """decoder""" , """self_attention""" )
SCREAMING_SNAKE_CASE_: Dict =layer_norm
SCREAMING_SNAKE_CASE_: str =k.T
SCREAMING_SNAKE_CASE_: Optional[Any] =o.T
SCREAMING_SNAKE_CASE_: Union[str, Any] =q.T
SCREAMING_SNAKE_CASE_: Optional[Any] =v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE_: Optional[int] =tax_layer_norm_lookup(lowercase , lowercase , """decoder""" , """pre_cross_attention_layer_norm""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =tax_attention_lookup(lowercase , lowercase , """decoder""" , """encoder_decoder_attention""" )
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm
SCREAMING_SNAKE_CASE_: Union[str, Any] =k.T
SCREAMING_SNAKE_CASE_: Tuple =o.T
SCREAMING_SNAKE_CASE_: List[Any] =q.T
SCREAMING_SNAKE_CASE_: Dict =v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE_: int =tax_layer_norm_lookup(lowercase , lowercase , """decoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =tax_mlp_lookup(lowercase , lowercase , """decoder""" , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: int =wi[0].T
SCREAMING_SNAKE_CASE_: Dict =wi[1].T
else:
SCREAMING_SNAKE_CASE_: List[Any] =wi.T
SCREAMING_SNAKE_CASE_: Union[str, Any] =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_: Any =tax_relpos_bias_lookup(lowercase , lowercase , """decoder""" ).T
SCREAMING_SNAKE_CASE_: str =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE_: Dict =old["""decoder/logits_dense/kernel"""].T
return new
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_: int =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict["""shared.weight"""]
return state_dict
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =checkpoints.load_tax_checkpoint(lowercase )
SCREAMING_SNAKE_CASE_: Dict =convert_tax_to_pytorch(
lowercase , num_layers=config.num_layers , is_encoder_only=lowercase , scalable_attention=lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =make_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase , strict=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase = False , lowercase = False , ):
SCREAMING_SNAKE_CASE_: Dict =MTaConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE_: Dict =UMTaEncoderModel(lowercase )
else:
SCREAMING_SNAKE_CASE_: Dict =UMTaForConditionalGeneration(lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase , lowercase , lowercase , lowercase , lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase )
print("""Done""" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =SwinConfig(image_size=192 )
if "base" in model_name:
SCREAMING_SNAKE_CASE_: str =6
SCREAMING_SNAKE_CASE_: Any =128
SCREAMING_SNAKE_CASE_: Optional[Any] =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Any =(4, 8, 16, 32)
elif "large" in model_name:
SCREAMING_SNAKE_CASE_: Optional[int] =12
SCREAMING_SNAKE_CASE_: Optional[int] =192
SCREAMING_SNAKE_CASE_: int =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Any =(6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =window_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =embed_dim
SCREAMING_SNAKE_CASE_: Tuple =depths
SCREAMING_SNAKE_CASE_: str =num_heads
return config
def __magic_name__ ( lowercase ):
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE_: List[str] ="""layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE_: int ="""layernorm.bias"""
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE_: str ="""swin.""" + name
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: int =orig_state_dict.pop(lowercase )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: List[Any] =int(key_split[2] )
SCREAMING_SNAKE_CASE_: Tuple =int(key_split[4] )
SCREAMING_SNAKE_CASE_: Optional[Any] =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: Any =val[:dim, :]
SCREAMING_SNAKE_CASE_: Any =val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Tuple =val[
:dim
]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =val[
-dim:
]
else:
SCREAMING_SNAKE_CASE_: str =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.load(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Tuple =get_swin_config(lowercase )
SCREAMING_SNAKE_CASE_: str =SwinForMaskedImageModeling(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE_: Any ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor(size={"""height""": 192, """width""": 192} )
SCREAMING_SNAKE_CASE_: int =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: Tuple =image_processor(images=lowercase , return_tensors="""pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(**lowercase ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCAmelCase = """main"""
# Default branch name
_UpperCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_UpperCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
_UpperCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_flax
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __magic_name__ ( lowercase , lowercase , lowercase=0 ):
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE_: Any =None
else:
SCREAMING_SNAKE_CASE_: str =""".""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE_: Optional[int] =fmt.format(lowercase )
# Print and recurse (if needed).
if isinstance(lowercase , lowercase ):
if msg is not None:
print(lowercase )
for k in val.keys():
recursive_print(lowercase , val[k] , spaces + 2 )
elif isinstance(lowercase , torch.Tensor ):
print(lowercase , """:""" , val.size() )
else:
print(lowercase , """:""" , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE_: Union[str, Any] =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE_: Dict =(num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE_: Any =param.view(*lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE_: Any =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE_: str =(num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE_: int =param.view(*lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_: Optional[Any] =param.view(*lowercase )
return param
def __magic_name__ ( lowercase , lowercase , lowercase ):
# The converted output model.
SCREAMING_SNAKE_CASE_: int ={}
# old versions did not store training args
SCREAMING_SNAKE_CASE_: Tuple =input_state_dict.get("""args""" , lowercase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE_: List[str] =ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE_: Dict =ds_args.hidden_size
SCREAMING_SNAKE_CASE_: Tuple =ds_args.num_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] =ds_args.num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[int] =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE_: List[Any] =config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE_: Optional[Any] =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE_: str =input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE_: Any =0.0
# The model.
SCREAMING_SNAKE_CASE_: Tuple =input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE_: List[str] =model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE_: Optional[int] =lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE_: Any =embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE_: Optional[int] =word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE_: Tuple =word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE_: Dict =embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE_: Optional[Any] =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE_: Dict =pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE_: Dict =lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE_: str =re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE_: List[str] ={
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE_: List[Any] =layer_re.match(lowercase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE_: Any =int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE_: List[Any] =m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE_: Tuple =m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE_: str =f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE_: Optional[int] =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE_: str =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE_: int =torch.tensor(-1e4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: Tuple =masked_bias
SCREAMING_SNAKE_CASE_: Tuple =fix_query_key_value_ordering(lowercase , lowercase , 3 , lowercase , lowercase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE_: int =out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE_: List[Any] =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE_: List[Any] =fix_query_key_value_ordering(lowercase , lowercase , 3 , lowercase , lowercase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE_: Tuple =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE_: Dict =megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE_: List[Any] =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE_: Any =megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE_: str =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE_: str =transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE_: List[str] =transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE_: Tuple =word_embeddings
# It should be done!
return output_state_dict
def __magic_name__ ( ):
# Create the argument parser.
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowercase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowercase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE_: str =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.load(lowercase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE_: int =torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE_: int =input_state_dict.get("""args""" , lowercase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE_: int ="""gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""gelu_new"""
else:
SCREAMING_SNAKE_CASE_: int ="""gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE_: Any ="""gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE_: Any =GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowercase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowercase , summary_activation=lowercase , summary_proj_to_labels=lowercase , summary_first_dropout=0.1 , scale_attn_weights=lowercase , use_cache=lowercase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE_: List[str] =GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE_: Union[str, Any] =["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE_: Optional[int] =convert_megatron_checkpoint(lowercase , lowercase , lowercase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase , lowercase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE_: List[Any] =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE_: Dict ="""gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE_: str =ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE_: List[str] ="""gpt2"""
SCREAMING_SNAKE_CASE_: Tuple =AutoTokenizer.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: str =type(lowercase ).__name__
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowercase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowercase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE_: Tuple =os.path.join(lowercase , """pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowercase , lowercase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase = """true"""
def __magic_name__ ( lowercase , lowercase=82 , lowercase=16 ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: List[str] =RegressionModel()
SCREAMING_SNAKE_CASE_: Dict =deepcopy(lowercase )
SCREAMING_SNAKE_CASE_: str =RegressionDataset(length=lowercase )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] =AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
SCREAMING_SNAKE_CASE_: List[str] =load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(lowercase ):
SCREAMING_SNAKE_CASE_: int =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
SCREAMING_SNAKE_CASE_: Dict =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
if use_longest:
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
SCREAMING_SNAKE_CASE_: str =get_dataloader(lowercase , not dispatch_batches )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =[]
for batch in dataloader:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =[], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def __magic_name__ ( lowercase , lowercase=82 , lowercase=False , lowercase=False , lowercase=16 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =get_basic_setup(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'''
def __magic_name__ ( lowercase = False , lowercase = False ):
SCREAMING_SNAKE_CASE_: Dict =evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =get_mrpc_setup(lowercase , lowercase )
# First do baseline
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =setup["""no"""]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Optional[int] =model(**lowercase )
SCREAMING_SNAKE_CASE_: int =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["""labels"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Any =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Dict =batch["""labels"""]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
SCREAMING_SNAKE_CASE_: str =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE_: Dict =Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
SCREAMING_SNAKE_CASE_: List[str] =Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def __magic_name__ ( lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_UpperCAmelCase = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_UpperCAmelCase = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_UpperCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_UpperCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_UpperCAmelCase = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_UpperCAmelCase = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_UpperCAmelCase = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =randrange(len(lowercase ) ), randrange(len(lowercase ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __magic_name__ ( lowercase = 100 ):
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def __magic_name__ ( lowercase , lowercase ):
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def __magic_name__ ( lowercase , lowercase ):
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def __magic_name__ ( lowercase , lowercase ):
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def __magic_name__ ( lowercase , lowercase ):
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase ):
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def __magic_name__ ( lowercase , lowercase , lowercase ):
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =[PokerHand(lowercase ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE_: Union[str, Any] =poker_hands.copy()
shuffle(lowercase )
SCREAMING_SNAKE_CASE_: int =chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def __magic_name__ ( ):
# Test that five high straights are compared correctly.
SCREAMING_SNAKE_CASE_: List[str] =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __magic_name__ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
SCREAMING_SNAKE_CASE_: List[Any] =PokerHand("""2C 4S AS 3D 5C""" )
SCREAMING_SNAKE_CASE_: str =True
SCREAMING_SNAKE_CASE_: Optional[Any] =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __magic_name__ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
SCREAMING_SNAKE_CASE_: Tuple =os.path.abspath(os.path.dirname(lowercase ) )
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE_: Any =line[:14].strip()
SCREAMING_SNAKE_CASE_: List[str] =line[15:].strip()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =PokerHand(lowercase ), PokerHand(lowercase )
SCREAMING_SNAKE_CASE_: Dict =player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.