code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a_ = logging.getLogger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_A = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_A = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = train_dataset.features["label"].names
if training_args.do_eval:
_A = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = eval_dataset.features["label"].names
if training_args.do_predict:
_A = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = predict_dataset.features["label"].names
# Labels
_A = len(__lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel={str(__lowercase ): label for i, label in enumerate(__lowercase )} , labelaid={label: i for i, label in enumerate(__lowercase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_A = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_A = False
def preprocess_function(__lowercase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__lowercase , max_length=data_args.max_seq_length , truncation=__lowercase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = min(len(__lowercase ) , data_args.max_train_samples )
_A = train_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A = train_dataset.map(
__lowercase , batched=__lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowercase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = min(len(__lowercase ) , data_args.max_eval_samples )
_A = eval_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A = eval_dataset.map(
__lowercase , batched=__lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_A = min(len(__lowercase ) , data_args.max_predict_samples )
_A = predict_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_A = predict_dataset.map(
__lowercase , batched=__lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
_A = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = p.predictions[0] if isinstance(p.predictions , __lowercase ) else p.predictions
_A = np.argmax(__lowercase , axis=1 )
return metric.compute(predictions=__lowercase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_A = default_data_collator
elif training_args.fpaa:
_A = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 )
else:
_A = None
# Initialize our Trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
_A = train_result.metrics
_A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
_A = min(__lowercase , len(__lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __lowercase )
trainer.save_metrics("train" , __lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A = trainer.evaluate(eval_dataset=__lowercase )
_A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
_A = min(__lowercase , len(__lowercase ) )
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_A , _A , _A = trainer.predict(__lowercase , metric_key_prefix="predict" )
_A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowercase )
)
_A = min(__lowercase , len(__lowercase ) )
trainer.log_metrics("predict" , __lowercase )
trainer.save_metrics("predict" , __lowercase )
_A = np.argmax(__lowercase , axis=1 )
_A = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(__lowercase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(__lowercase ):
_A = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main() | 621 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase ) -> list[int]:
_A = 2
_A = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowercase )
if n > 1:
factors.append(__lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( _UpperCamelCase):
__UpperCamelCase = (IPNDMScheduler,)
__UpperCamelCase = (('num_inference_steps', 50),)
def a_ ( self : Union[str, Any] , **a__ : List[str] ) -> int:
'''simple docstring'''
_A = {"num_train_timesteps": 10_00}
config.update(**a__ )
return config
def a_ ( self : Dict , a__ : Any=0 , **a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , a__ )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**a__ )
_A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_A = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] , a__ : Union[str, Any]=0 , **a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , a__ )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_A = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ ( self : Union[str, Any] , **a__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**a__ )
_A = scheduler_class(**a__ )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
_A = model(a__ , a__ )
_A = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(a__ , a__ )
_A = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , a__ )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , "set_timesteps" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , "set_timesteps" ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = self.full_loop()
_A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10 | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def a__ ( __lowercase , __lowercase=False ) -> List[Any]:
_A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def a__ ( __lowercase , __lowercase , __lowercase=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_A = ""
else:
_A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
_A = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def a__ ( __lowercase ) -> List[Any]:
_A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> Union[str, Any]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_A = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Dict:
_A = dct.pop(__lowercase )
_A = val
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
_A = ViTMSNConfig()
_A = 1000
_A = "datasets/huggingface/label-files"
_A = "imagenet-1k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_A = 384
_A = 1536
_A = 6
elif "l16" in checkpoint_url:
_A = 1024
_A = 4096
_A = 24
_A = 16
_A = 0.1
elif "b4" in checkpoint_url:
_A = 4
elif "l7" in checkpoint_url:
_A = 7
_A = 1024
_A = 4096
_A = 24
_A = 16
_A = 0.1
_A = ViTMSNModel(__lowercase )
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["target_encoder"]
_A = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowercase )
_A = create_rename_keys(__lowercase , base_model=__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , base_model=__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_A = ViTImageProcessor(
size=config.image_size , image_mean=__lowercase , image_std=__lowercase )
_A = image_processor(images=__lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_A = model(**__lowercase )
_A = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_A = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
_A = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
_A = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
_A = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
_A = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowercase , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> bool:
if len(__lowercase ) == 0:
return False
_A = len(__lowercase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowercase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowercase )
if __name__ == "__main__":
a_ = input("Enter numbers separated by comma:\n").strip()
a_ = [int(item.strip()) for item in user_input.split(",")]
a_ = int(input("Enter the number to be found in the list:\n").strip())
a_ = "" if binary_search(sequence, target) else "not "
print(f'''{target} was {not_str}found in {sequence}''') | 621 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 1 |
"""simple docstring"""
class snake_case : # Public class to implement a graph
def __init__( self : int , a__ : int , a__ : int , a__ : list[list[bool]] ) -> None:
'''simple docstring'''
_A = row
_A = col
_A = graph
def a_ ( self : Tuple , a__ : int , a__ : int , a__ : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a_ ( self : str , a__ : int , a__ : int , a__ : list[list[bool]] ) -> None:
'''simple docstring'''
_A = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A = [-1, 0, 1, -1, 1, -1, 0, 1]
_A = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a__ )
def a_ ( self : List[str] ) -> int: # And finally, count all islands.
'''simple docstring'''
_A = [[False for j in range(self.COL )] for i in range(self.ROW )]
_A = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a__ , a__ , a__ )
count += 1
return count | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'WhisperFeatureExtractor'
__UpperCamelCase = 'WhisperTokenizer'
def __init__( self : List[Any] , a__ : List[Any] , a__ : List[Any] ) -> int:
'''simple docstring'''
super().__init__(a__ , a__ )
_A = self.feature_extractor
_A = False
def a_ ( self : Any , a__ : Tuple=None , a__ : Any=None , a__ : Optional[int]=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self : int , *a__ : Optional[Any] , **a__ : List[Any] ) -> Any:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_A = kwargs.pop("audio" , a__ )
_A = kwargs.pop("sampling_rate" , a__ )
_A = kwargs.pop("text" , a__ )
if len(a__ ) > 0:
_A = args[0]
_A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_A = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
_A = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def a_ ( self : Dict , *a__ : Optional[int] , **a__ : Tuple ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*a__ , **a__ )
def a_ ( self : Dict , *a__ : Optional[int] , **a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*a__ , **a__ )
def a_ ( self : List[Any] , a__ : str , a__ : Union[str, Any]="np" ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(a__ , return_tensors=a__ ) | 621 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import math
def a__ ( __lowercase = 100 ) -> int:
_A = sum(i * i for i in range(1 , n + 1 ) )
_A = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''') | 621 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
if len(__lowercase ) != len(__lowercase ):
raise ValueError("String lengths must match!" )
_A = 0
for chara, chara in zip(__lowercase , __lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , use_external_data_format=__lowercase , enable_onnx_checker=__lowercase , opset_version=__lowercase , )
else:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , opset_version=__lowercase , )
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Optional[Any]:
_A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_A = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_A = "cpu"
_A = StableDiffusionPipeline.from_pretrained(__lowercase , torch_dtype=__lowercase ).to(__lowercase )
_A = Path(__lowercase )
# TEXT ENCODER
_A = pipeline.text_encoder.config.max_position_embeddings
_A = pipeline.text_encoder.config.hidden_size
_A = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=__lowercase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__lowercase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=__lowercase , )
del pipeline.text_encoder
# UNET
_A = pipeline.unet.config.in_channels
_A = pipeline.unet.config.sample_size
_A = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(2 ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(2 , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=__lowercase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=__lowercase , use_external_data_format=__lowercase , )
_A = str(unet_path.absolute().as_posix() )
_A = os.path.dirname(__lowercase )
_A = onnx.load(__lowercase )
# clean up existing tensor files
shutil.rmtree(__lowercase )
os.mkdir(__lowercase )
# collate external tensor files into one
onnx.save_model(
__lowercase , __lowercase , save_as_external_data=__lowercase , all_tensors_to_one_file=__lowercase , location="weights.pb" , convert_attribute=__lowercase , )
del pipeline.unet
# VAE ENCODER
_A = pipeline.vae
_A = vae_encoder.config.in_channels
_A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_A = lambda __lowercase , __lowercase : vae_encoder.encode(__lowercase , __lowercase )[0].sample()
onnx_export(
__lowercase , model_args=(
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowercase , )
# VAE DECODER
_A = pipeline.vae
_A = vae_decoder.config.latent_channels
_A = vae_decoder.config.out_channels
# forward only through the decoder part
_A = vae_encoder.decode
onnx_export(
__lowercase , model_args=(
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowercase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_A = pipeline.safety_checker
_A = safety_checker.config.vision_config.num_channels
_A = safety_checker.config.vision_config.image_size
_A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __lowercase , __lowercase , __lowercase , ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=__lowercase , )
del pipeline.safety_checker
_A = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
_A = pipeline.feature_extractor
else:
_A = None
_A = None
_A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__lowercase )
print("ONNX pipeline saved to" , __lowercase )
del pipeline
del onnx_pipeline
_A = OnnxStableDiffusionPipeline.from_pretrained(__lowercase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
a_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa) | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
"""simple docstring"""
import math
def a__ ( __lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( __lowercase = 0.1 ) -> int:
_A = 3
_A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ = "http://www.mocksite.com/file1.txt"
a_ = "\"text\": [\"foo\", \"foo\"]"
a_ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case :
__UpperCamelCase = 200
__UpperCamelCase = {'Content-Length': '100'}
__UpperCamelCase = {}
def a_ ( self : Dict , **a__ : Tuple ) -> List[str]:
'''simple docstring'''
return [bytes(a__ , "utf-8" )]
def a__ ( *__lowercase , **__lowercase ) -> Any:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Tuple:
import requests
monkeypatch.setattr(__lowercase , "request" , __lowercase )
_A = URL
if issubclass(__lowercase , __lowercase ):
_A = url
elif issubclass(__lowercase , __lowercase ):
_A = [url]
elif issubclass(__lowercase , __lowercase ):
_A = {"train": url}
_A = "dummy"
_A = "downloads"
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(__lowercase , __lowercase ) , use_etag=__lowercase , )
_A = DownloadManager(dataset_name=__lowercase , download_config=__lowercase )
_A = dl_manager.download(__lowercase )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowercase , __lowercase ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(__lowercase , __lowercase ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowercase , __lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(__lowercase )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
_A = str(__lowercase )
if issubclass(__lowercase , __lowercase ):
_A = filename
elif issubclass(__lowercase , __lowercase ):
_A = [filename]
elif issubclass(__lowercase , __lowercase ):
_A = {"train": filename}
_A = "dummy"
_A = xz_file.parent
_A = "extracted"
_A = DownloadConfig(
cache_dir=__lowercase , use_etag=__lowercase , )
_A = DownloadManager(dataset_name=__lowercase , download_config=__lowercase )
_A = dl_manager.extract(__lowercase )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowercase , __lowercase ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(__lowercase , __lowercase ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowercase , __lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(__lowercase )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowercase , etag=__lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowercase , start=1 ):
_A = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
_A = request.getfixturevalue(__lowercase )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
_test_jsonl(__lowercase , __lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
_A = request.getfixturevalue(__lowercase )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
_test_jsonl(__lowercase , __lowercase )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( __lowercase ) -> Dict:
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowercase ) , start=1 ):
assert os.path.basename(__lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 621 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = GPTaTokenizer
__UpperCamelCase = GPTaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'add_prefix_space': True}
__UpperCamelCase = False
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_A = dict(zip(a__ , range(len(a__ ) ) ) )
_A = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def a_ ( self : str , **a__ : str ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : str , **a__ : Dict ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : int , a__ : int ) -> Tuple:
'''simple docstring'''
_A = "lower newer"
_A = "lower newer"
return input_text, output_text
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_A = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = "lower newer"
_A = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_A = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_A = tokens + [tokenizer.unk_token]
_A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer(add_prefix_space=a__ )
_A = "lower newer"
# Testing tokenization
_A = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_A = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_A = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_A = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_A = self.get_rust_tokenizer(add_prefix_space=a__ )
_A = tokenizer.encode(a__ , add_prefix_space=a__ )
_A = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_A = tokens + [rust_tokenizer.unk_token]
_A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a_ ( self : List[str] , *a__ : Any , **a__ : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def a_ ( self : Any , a__ : List[str]=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_A = "This is a simple input"
_A = ["This is a simple input 1", "This is a simple input 2"]
_A = ("This is a simple input", "This is a pair")
_A = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
_A = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_A = "This is a simple input"
_A = ["This is a simple input looooooooong", "This is a simple input"]
_A = ("This is a simple input", "This is a pair")
_A = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_A = tokenizer.pad_token_id
_A = tokenizer(a__ , padding="max_length" , max_length=30 , return_tensors="np" )
_A = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="np" )
_A = tokenizer(*a__ , padding="max_length" , max_length=60 , return_tensors="np" )
_A = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_A = "$$$"
_A = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_A = "This is a simple input"
_A = ["This is a simple input 1", "This is a simple input 2"]
_A = tokenizer.bos_token_id
_A = tokenizer(a__ )
_A = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_A = tokenizer.decode(out_s.input_ids )
_A = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_A = "Encode this."
_A = "This one too please."
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
_A = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
_A = encoded_sequence_dict["input_ids"]
_A = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a__ ) , len(a__ ) )
_A = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
_A = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class snake_case ( unittest.TestCase):
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a__ )
_A = "A photo of a cat"
_A = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
_A = AutoTokenizer.from_pretrained("./test_opt" )
_A = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a__ )
_A = "A photo of a cat"
_A = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a__ )
_A = "bos"
_A = tokenizer.get_vocab()["bos"]
_A = "A photo of a cat"
_A = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
_A = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_A = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) | 621 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 1 |
"""simple docstring"""
import math
def a__ ( __lowercase , __lowercase ) -> int:
_A = len(__lowercase )
_A = int(math.floor(math.sqrt(__lowercase ) ) )
_A = 0
while arr[min(__lowercase , __lowercase ) - 1] < x:
_A = step
step += int(math.floor(math.sqrt(__lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_A = prev + 1
if prev == min(__lowercase , __lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a_ = input("Enter numbers separated by a comma:\n").strip()
a_ = [int(item) for item in user_input.split(",")]
a_ = int(input("Enter the number to be searched:\n"))
a_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''') | 621 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def a__ ( __lowercase ) -> dict:
_A = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowercase ).json()
def a__ ( __lowercase = 10 ) -> list[dict]:
_A = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_A = requests.get(__lowercase ).json()[:max_stories]
return [get_hackernews_story(__lowercase ) for story_id in story_ids]
def a__ ( __lowercase = 10 ) -> str:
_A = hackernews_top_stories(__lowercase )
return "\n".join("* [{title}]({url})".format(**__lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = KandinskyVaaPriorPipeline
__UpperCamelCase = ['prompt']
__UpperCamelCase = ['prompt', 'negative_prompt']
__UpperCamelCase = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return 32
@property
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return 32
@property
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def a_ ( self : Dict ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
return 1_00
@property
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a__ )
@property
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_A = PriorTransformer(**a__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A = CLIPVisionModelWithProjection(a__ )
return model
@property
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_image_processor
_A = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=a__ , clip_sample_range=1_0.0 , )
_A = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def a_ ( self : Union[str, Any] , a__ : Optional[Any] , a__ : List[str]=0 ) -> Optional[int]:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = pipe(**self.get_dummy_inputs(a__ ) )
_A = output.image_embeds
_A = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_A = image[0, -10:]
_A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A = torch_device == "cpu"
_A = True
_A = False
self._test_inference_batch_single_identical(
test_max_difference=a__ , relax_max_difference=a__ , test_mean_pixel_difference=a__ , )
@skip_mps
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_A = torch_device == "cpu"
_A = False
self._test_attention_slicing_forward_pass(
test_max_difference=a__ , test_mean_pixel_difference=a__ , ) | 621 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case :
@staticmethod
def a_ ( *a__ : Tuple , **a__ : List[str] ) -> Tuple:
'''simple docstring'''
pass
def a__ ( __lowercase ) -> str:
_A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case ( unittest.TestCase):
__UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a_ ( self : Dict , a__ : Tuple , a__ : Optional[int] , a__ : str ) -> Optional[Any]:
'''simple docstring'''
_A = DepthEstimationPipeline(model=a__ , image_processor=a__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a_ ( self : Tuple , a__ : Optional[int] , a__ : Dict ) -> int:
'''simple docstring'''
_A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a__ )
import datasets
_A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
@slow
@require_torch
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_A = "Intel/dpt-large"
_A = pipeline("depth-estimation" , model=a__ )
_A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" ) | 621 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 1 |
"""simple docstring"""
import operator as op
def a__ ( __lowercase ) -> str:
_A = []
_A = lambda __lowercase , __lowercase : int(x / y ) # noqa: E731 integer division operation
_A = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a_ = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix)) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ = logging.getLogger(__name__)
a_ = "pytorch_model.bin"
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The name of the task to train on.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
__UpperCamelCase = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
__UpperCamelCase = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCamelCase = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCamelCase = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Random seed for initialization.'} , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
_A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_A = dataset.filter(lambda __lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_A = int(eval_result * len(__lowercase ) )
print(__lowercase )
_A = dataset.sort("probability" , reverse=__lowercase )
_A = dataset.select(range(__lowercase ) )
_A = dataset.remove_columns(["label", "probability"] )
_A = dataset.rename_column("prediction" , "label" )
_A = dataset.map(lambda __lowercase : {"label": idalabel[example["label"]]} )
_A = dataset.shuffle(seed=args.seed )
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowercase , index=__lowercase )
else:
dataset.to_json(__lowercase )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> List[Any]:
_A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_A = STModelArguments(model_name_or_path=__lowercase )
_A = STDataArguments(train_file=__lowercase , infer_file=__lowercase )
_A = STTrainingArguments(output_dir=__lowercase )
_A = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowercase ).items():
setattr(__lowercase , __lowercase , __lowercase )
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
# Sanity checks
_A = {}
_A = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_A = args.train_file
_A = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_A = args.eval_file
for key in data_files:
_A = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_A = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_A = f"""{args.output_dir}/self-train_iter-{{}}""".format
_A = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowercase )
os.makedirs(__lowercase , exist_ok=__lowercase )
accelerator.wait_for_everyone()
_A = None
_A = None
_A = 0
_A = False
# Show the progress bar
_A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_A = data_dir_format(__lowercase )
assert os.path.exists(__lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_A = os.path.join(__lowercase , "stage-1" )
_A = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowercase , __lowercase ):
arguments_dict.update({key: value} )
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_A = os.path.join(__lowercase , "best-checkpoint" )
_A = os.path.join(__lowercase , "stage-2" )
# Update arguments_dict
_A = model_path
_A = data_files["train"]
_A = current_output_dir
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowercase )
_A = iteration
_A = data_dir_format(iteration + 1 )
_A = AutoConfig.from_pretrained(os.path.join(__lowercase , "best-checkpoint" ) )
_A = config.idalabel
_A = os.path.join(__lowercase , "eval_results_best-checkpoint.json" )
_A = os.path.join(__lowercase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowercase )
with open(__lowercase , "r" ) as f:
_A = float(json.load(__lowercase )[args.eval_metric] )
_A = os.path.join(__lowercase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowercase )
# Loading the dataset from local csv or json files.
_A = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_A = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowercase , exist_ok=__lowercase )
shutil.copy(__lowercase , os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowercase ):
shutil.copy(__lowercase , os.path.join(__lowercase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.wait_for_everyone()
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_A = eval_result
if best_iteration is None:
_A = new_iteration
_A = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_A = new_iteration
_A = new_eval_result
_A = 0
else:
if new_eval_result == best_eval_result:
_A = new_iteration
_A = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_A = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowercase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , ) | 621 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> list:
_A = word.split()
def justify(__lowercase , __lowercase , __lowercase ) -> str:
_A = max_width - width
_A = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase , __lowercase , __lowercase ) )
# reset new line and new width
_A , _A = [word], len(__lowercase )
_A = max_width - width - len(__lowercase )
answer.append(" ".join(__lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 621 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
class snake_case :
def __init__( self : Tuple , a__ : Any , a__ : Tuple ) -> str:
'''simple docstring'''
_A = name
_A = val
def __str__( self : Any ) -> int:
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[Any] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
return self.val < other.val
class snake_case :
def __init__( self : int , a__ : List[str] ) -> Dict:
'''simple docstring'''
_A = {}
_A = {}
_A = self.build_heap(a__ )
def __getitem__( self : str , a__ : Any ) -> Any:
'''simple docstring'''
return self.get_value(a__ )
def a_ ( self : List[Any] , a__ : Optional[int] ) -> int:
'''simple docstring'''
return (idx - 1) // 2
def a_ ( self : Optional[int] , a__ : Optional[int] ) -> str:
'''simple docstring'''
return idx * 2 + 1
def a_ ( self : Dict , a__ : int ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 2
def a_ ( self : str , a__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.heap_dict[key]
def a_ ( self : Dict , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = len(a__ ) - 1
_A = self.get_parent_idx(a__ )
for idx, i in enumerate(a__ ):
_A = idx
_A = i.val
for i in range(a__ , -1 , -1 ):
self.sift_down(a__ , a__ )
return array
def a_ ( self : Any , a__ : int , a__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
while True:
_A = self.get_left_child_idx(a__ ) # noqa: E741
_A = self.get_right_child_idx(a__ )
_A = idx
if l < len(a__ ) and array[l] < array[idx]:
_A = l
if r < len(a__ ) and array[r] < array[smallest]:
_A = r
if smallest != idx:
_A , _A = array[smallest], array[idx]
(
(
_A
) , (
_A
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_A = smallest
else:
break
def a_ ( self : List[str] , a__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = self.get_parent_idx(a__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
_A , _A = self.heap[idx], self.heap[p]
_A , _A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_A = p
_A = self.get_parent_idx(a__ )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return self.heap[0]
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
_A , _A = self.heap[-1], self.heap[0]
_A , _A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def a_ ( self : str , a__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.heap.append(a__ )
_A = len(self.heap ) - 1
_A = node.val
self.sift_up(len(self.heap ) - 1 )
def a_ ( self : Any ) -> Tuple:
'''simple docstring'''
return len(self.heap ) == 0
def a_ ( self : int , a__ : Dict , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_A = new_value
_A = new_value
self.sift_up(self.idx_of_element[node] )
a_ = Node("R", -1)
a_ = Node("B", 6)
a_ = Node("A", 3)
a_ = Node("X", 1)
a_ = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> None:
_A = len(__lowercase )
print("The following activities are selected:" )
# The first activity is always selected
_A = 0
print(__lowercase , end="," )
# Consider rest of the activities
for j in range(__lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowercase , end="," )
_A = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [1, 3, 0, 5, 8, 5]
a_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = MvpTokenizer
__UpperCamelCase = MvpTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = filter_roberta_detectors
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
_A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_A = dict(zip(a__ , range(len(a__ ) ) ) )
_A = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def a_ ( self : Optional[Any] , **a__ : List[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : int , **a__ : int ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : str , a__ : int ) -> Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A = tokenizer(a__ , max_length=len(a__ ) , padding=a__ , return_tensors="pt" )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(a__ , a__ )
# Test that special tokens are reset
@require_torch
def a_ ( self : str ) -> Dict:
'''simple docstring'''
_A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A = tokenizer(a__ , padding=a__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , a__ )
self.assertIn("attention_mask" , a__ )
self.assertNotIn("labels" , a__ )
self.assertNotIn("decoder_attention_mask" , a__ )
@require_torch
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_A = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A = tokenizer(text_target=a__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=a__ , truncation=a__ , return_tensors="pt" )
self.assertIsInstance(a__ , a__ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_A = ["A long paragraph for summarization."]
_A = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A = tokenizer(a__ , text_target=a__ , return_tensors="pt" )
_A = inputs["input_ids"]
_A = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_A = self.tokenizer_class.from_pretrained(a__ , **a__ )
_A = "A, <mask> AllenNLP sentence."
_A = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
_A = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_A = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_A = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) | 621 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( _UpperCamelCase):
def __init__( self : Any , a__ : NestedDataStructureLike[PathLike] , a__ : Optional[NamedSplit] = None , a__ : Optional[Features] = None , a__ : str = None , a__ : bool = False , a__ : bool = False , a__ : Optional[int] = None , **a__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , num_proc=a__ , **a__ , )
_A = path_or_paths if isinstance(a__ , a__ ) else {self.split: path_or_paths}
_A = Text(
cache_dir=a__ , data_files=a__ , features=a__ , **a__ , )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
if self.streaming:
_A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=a__ , download_mode=a__ , verification_mode=a__ , base_path=a__ , num_proc=self.num_proc , )
_A = self.builder.as_dataset(
split=self.split , verification_mode=a__ , in_memory=self.keep_in_memory )
return dataset | 621 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( __lowercase ) -> int:
_A = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( __lowercase , __lowercase ) -> str:
_A = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( __lowercase ) -> Dict:
_A = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ) -> Dict:
_A = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = "imagenet-1k-id2label.json"
_A = 1000
_A = "huggingface/label-files"
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = _A = CvtConfig(num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
_A = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
_A = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_A = [2, 2, 20]
_A = [3, 12, 16]
_A = [192, 768, 1024]
_A = CvtForImageClassification(__lowercase )
_A = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
_A = image_size
_A = torch.load(__lowercase , map_location=torch.device("cpu" ) )
_A = OrderedDict()
_A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_A = list_of_state_dict + cls_token(__lowercase )
_A = list_of_state_dict + embeddings(__lowercase )
for cnt in range(config.depth[idx] ):
_A = list_of_state_dict + attention(__lowercase , __lowercase )
_A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowercase )
for i in range(len(__lowercase ) ):
_A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_84,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'beit'
def __init__( self : Tuple , a__ : List[Any]=81_92 , a__ : Tuple=7_68 , a__ : Dict=12 , a__ : List[Any]=12 , a__ : Dict=30_72 , a__ : List[Any]="gelu" , a__ : Optional[Any]=0.0 , a__ : Optional[int]=0.0 , a__ : Dict=0.0_2 , a__ : str=1E-1_2 , a__ : Union[str, Any]=2_24 , a__ : Dict=16 , a__ : Any=3 , a__ : Optional[int]=False , a__ : Union[str, Any]=False , a__ : Any=False , a__ : Union[str, Any]=False , a__ : Any=0.1 , a__ : Dict=0.1 , a__ : int=True , a__ : str=[3, 5, 7, 11] , a__ : Union[str, Any]=[1, 2, 3, 6] , a__ : Any=True , a__ : Tuple=0.4 , a__ : str=2_56 , a__ : int=1 , a__ : Tuple=False , a__ : int=2_55 , **a__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**a__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = use_mask_token
_A = use_absolute_position_embeddings
_A = use_relative_position_bias
_A = use_shared_relative_position_bias
_A = layer_scale_init_value
_A = drop_path_rate
_A = use_mean_pooling
# decode head attributes (semantic segmentation)
_A = out_indices
_A = pool_scales
# auxiliary head attributes (semantic segmentation)
_A = use_auxiliary_head
_A = auxiliary_loss_weight
_A = auxiliary_channels
_A = auxiliary_num_convs
_A = auxiliary_concat_input
_A = semantic_loss_ignore_index
class snake_case ( _UpperCamelCase):
__UpperCamelCase = version.parse('1.11')
@property
def a_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a_ ( self : Dict ) -> float:
'''simple docstring'''
return 1E-4 | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a_ = logging.get_logger(__name__)
class snake_case :
def __init__( self : Union[str, Any] , a__ : Any , a__ : Union[str, Any] ) -> int:
'''simple docstring'''
_A = question_encoder
_A = generator
_A = self.question_encoder
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if os.path.isfile(a__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a__ , exist_ok=a__ )
_A = os.path.join(a__ , "question_encoder_tokenizer" )
_A = os.path.join(a__ , "generator_tokenizer" )
self.question_encoder.save_pretrained(a__ )
self.generator.save_pretrained(a__ )
@classmethod
def a_ ( cls : int , a__ : List[Any] , **a__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop("config" , a__ )
if config is None:
_A = RagConfig.from_pretrained(a__ )
_A = AutoTokenizer.from_pretrained(
a__ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_A = AutoTokenizer.from_pretrained(
a__ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=a__ , generator=a__ )
def __call__( self : Optional[int] , *a__ : int , **a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*a__ , **a__ )
def a_ ( self : Optional[int] , *a__ : Any , **a__ : Tuple ) -> str:
'''simple docstring'''
return self.generator.batch_decode(*a__ , **a__ )
def a_ ( self : Tuple , *a__ : Union[str, Any] , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
return self.generator.decode(*a__ , **a__ )
def a_ ( self : int ) -> str:
'''simple docstring'''
_A = self.question_encoder
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = self.generator
def a_ ( self : Dict , a__ : List[str] , a__ : Optional[List[str]] = None , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : str = "longest" , a__ : str = None , a__ : bool = True , **a__ : Optional[int] , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , a__ , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
a__ , add_special_tokens=a__ , return_tensors=a__ , max_length=a__ , padding=a__ , truncation=a__ , **a__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=a__ , add_special_tokens=a__ , return_tensors=a__ , padding=a__ , max_length=a__ , truncation=a__ , **a__ , )
_A = labels["input_ids"]
return model_inputs | 621 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
a_ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def a__ ( __lowercase , __lowercase = 1 , __lowercase = "new" , __lowercase = None ) -> dict:
_A = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_A = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowercase )
_A = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_A = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_A = {}
for id_ in range(__lowercase ):
_A = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"])) | 621 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a__ , )
_A = AutoencoderKL()
_A = DDIMScheduler()
_A = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def a_ ( self : Optional[Any] , a__ : Any , a__ : Dict=0 ) -> Optional[Any]:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_A = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
def a_ ( self : str ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = torch.manual_seed(0 )
_A = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_A = ["vase", "umbrella", "white shark", "white wolf"]
_A = pipe.get_label_ids(a__ )
_A = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_A = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_A = ["vase", "umbrella"]
_A = pipe.get_label_ids(a__ )
_A = torch.manual_seed(0 )
_A = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( __lowercase ) -> Optional[Any]:
def wrapper(*__lowercase , **__lowercase ):
_A = timeit.default_timer()
_A = func(*__lowercase , **__lowercase )
_A = timeit.default_timer() - starttime
return delta
_A = func.__name__
return wrapper
def a__ ( __lowercase , __lowercase=100 , __lowercase=None ) -> Any:
_A = []
_A = seq_shapes or {}
for i in range(__lowercase ):
_A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowercase , _ArrayXD ):
_A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowercase , datasets.Value ):
if v.dtype == "string":
_A = "The small grey turtle was surprisingly fast when challenged."
else:
_A = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowercase , datasets.Sequence ):
while isinstance(__lowercase , datasets.Sequence ):
_A = v.feature
_A = seq_shapes[k]
_A = np.random.rand(*__lowercase ).astype(v.dtype )
_A = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( __lowercase , __lowercase , __lowercase=100 , __lowercase=None ) -> Union[str, Any]:
_A = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase )
with ArrowWriter(features=__lowercase , path=__lowercase ) as writer:
for key, record in dummy_data:
_A = features.encode_example(__lowercase )
writer.write(__lowercase )
_A , _A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_A = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) )
return dataset | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = ShapEPipeline
__UpperCamelCase = ['prompt']
__UpperCamelCase = ['prompt']
__UpperCamelCase = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def a_ ( self : int ) -> int:
'''simple docstring'''
return 32
@property
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return 32
@property
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def a_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return 8
@property
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a__ )
@property
def a_ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_A = PriorTransformer(**a__ )
return model
@property
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_A = ShapERenderer(**a__ )
return model
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
_A = self.dummy_prior
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_renderer
_A = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_A = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , a__ : str , a__ : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = pipe(**self.get_dummy_inputs(a__ ) )
_A = output.images[0]
_A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a_ ( self : str ) -> Dict:
'''simple docstring'''
_A = torch_device == "cpu"
_A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = 1
_A = 2
_A = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_A = batch_size * [inputs[key]]
_A = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_A = ShapEPipeline.from_pretrained("openai/shap-e" )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = pipe(
"a shark" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ ) | 621 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase , __lowercase , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple[int, int]:
def constraint_to_multiple_of(__lowercase , __lowercase , __lowercase=0 , __lowercase=None ):
_A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_A = math.floor(val / multiple ) * multiple
if x < min_val:
_A = math.ceil(val / multiple ) * multiple
return x
_A = (output_size, output_size) if isinstance(__lowercase , __lowercase ) else output_size
_A , _A = get_image_size(__lowercase )
_A , _A = output_size
# determine new height and width
_A = output_height / input_height
_A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_A = scale_width
else:
# fit height
_A = scale_height
_A = constraint_to_multiple_of(scale_height * input_height , multiple=__lowercase )
_A = constraint_to_multiple_of(scale_width * input_width , multiple=__lowercase )
return (new_height, new_width)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['pixel_values']
def __init__( self : Any , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = False , a__ : int = 1 , a__ : bool = True , a__ : Union[int, float] = 1 / 2_55 , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
_A = size if size is not None else {"height": 3_84, "width": 3_84}
_A = get_size_dict(a__ )
_A = do_resize
_A = size
_A = keep_aspect_ratio
_A = ensure_multiple_of
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : int , a__ : np.ndarray , a__ : Dict[str, int] , a__ : bool = False , a__ : int = 1 , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
_A = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A = get_resize_output_image_size(
a__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a__ , multiple=a__ , )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def a_ ( self : int , a__ : np.ndarray , a__ : Union[int, float] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : int , ) -> Any:
'''simple docstring'''
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a_ ( self : List[Any] , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def a_ ( self : Dict , a__ : ImageInput , a__ : bool = None , a__ : int = None , a__ : bool = None , a__ : int = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(a__ )
_A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_A = [to_numpy_array(a__ ) for image in images]
if do_resize:
_A = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_rescale:
_A = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_A = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_A = [to_channel_dimension_format(a__ , a__ ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=a__ , tensor_type=a__ )
def a_ ( self : int , a__ : str , a__ : List[Tuple] = None ) -> str:
'''simple docstring'''
_A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a__ ) != len(a__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a__ ):
_A = target_sizes.numpy()
_A = []
for idx in range(len(a__ ) ):
_A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a__ )
_A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a__ )
else:
_A = logits.argmax(dim=1 )
_A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 621 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ = concatenate_datasets
a_ = DownloadConfig
a_ = DownloadManager
a_ = DownloadMode
a_ = DownloadConfig
a_ = DownloadMode
a_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 621 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'time_series_transformer'
__UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Tuple , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : str = "student_t" , a__ : str = "nll" , a__ : int = 1 , a__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a__ : Optional[Union[str, bool]] = "mean" , a__ : int = 0 , a__ : int = 0 , a__ : int = 0 , a__ : int = 0 , a__ : Optional[List[int]] = None , a__ : Optional[List[int]] = None , a__ : int = 32 , a__ : int = 32 , a__ : int = 2 , a__ : int = 2 , a__ : int = 2 , a__ : int = 2 , a__ : bool = True , a__ : str = "gelu" , a__ : int = 64 , a__ : float = 0.1 , a__ : float = 0.1 , a__ : float = 0.1 , a__ : float = 0.1 , a__ : float = 0.1 , a__ : int = 1_00 , a__ : float = 0.0_2 , a__ : Tuple=True , **a__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_A = prediction_length
_A = context_length or prediction_length
_A = distribution_output
_A = loss
_A = input_size
_A = num_time_features
_A = lags_sequence
_A = scaling
_A = num_dynamic_real_features
_A = num_static_real_features
_A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_A = cardinality
else:
_A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_A = embedding_dimension
else:
_A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A = num_parallel_samples
# Transformer architecture configuration
_A = input_size * len(a__ ) + self._number_of_features
_A = d_model
_A = encoder_attention_heads
_A = decoder_attention_heads
_A = encoder_ffn_dim
_A = decoder_ffn_dim
_A = encoder_layers
_A = decoder_layers
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = activation_function
_A = init_std
_A = use_cache
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 621 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
def count_of_possible_combinations(__lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowercase )
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
def count_of_possible_combinations_with_dp_array(
__lowercase , __lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_A = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowercase )
for item in array )
_A = answer
return answer
_A = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowercase , __lowercase )
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
_A = [0] * (target + 1)
_A = 1
for i in range(1 , target + 1 ):
for j in range(__lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = 3
a_ = 5
a_ = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 621 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a_ = input("Enter image url: ").strip()
print(f'''Downloading image from {url} ...''')
a_ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
a_ = soup.find("meta", {"property": "og:image"})["content"]
a_ = requests.get(image_url).content
a_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 621 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a__ ( __lowercase = "AAPL" ) -> str:
_A = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_A = BeautifulSoup(requests.get(__lowercase ).text , "html.parser" )
_A = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 621 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_A = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
_A = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_A = model(a__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1E-3 ) )
@slow
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_A = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
_A = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_A = model(a__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1E-3 ) ) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 1 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class snake_case ( _UpperCamelCase):
def __init__( self : int , a__ : Tuple , a__ : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=a__ , scheduler=a__ )
def __call__( self : List[str] ) -> str:
'''simple docstring'''
_A = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A = 1
_A = self.unet(a__ , a__ ).sample
_A = self.scheduler.step(a__ , a__ , a__ ).prev_sample
_A = scheduler_output - scheduler_output + torch.ones_like(a__ )
return result | 621 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case ( unittest.TestCase):
__UpperCamelCase = JukeboxTokenizer
__UpperCamelCase = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
import torch
_A = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
_A = tokenizer(**self.metas )["input_ids"]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
_A = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
_A = tokenizer(**self.metas )["input_ids"]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 621 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import copy
import re
class snake_case :
__UpperCamelCase = 'hp'
__UpperCamelCase = {}
__UpperCamelCase = None
@classmethod
def a_ ( cls : Any , a__ : str , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = prefix
_A = defaults
cls.build_naming_info()
@staticmethod
def a_ ( a__ : Optional[Any] , a__ : Dict ) -> Tuple:
'''simple docstring'''
if len(a__ ) == 0:
return ""
_A = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(a__ ) + 1 ):
_A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(a__ : Optional[Any] ):
_A = ""
while integer != 0:
_A = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
_A = 0
while True:
_A = word + "#" + int_to_alphabetic(a__ )
if sword in info["reverse_short_word"]:
continue
else:
_A = sword
break
_A = short_word
_A = word
return short_word
@staticmethod
def a_ ( a__ : Dict , a__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = param_name.split("_" )
_A = [TrialShortNamer.shortname_for_word(a__ , a__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_A = ["", "_"]
for separator in separators:
_A = separator.join(a__ )
if shortname not in info["reverse_short_param"]:
_A = shortname
_A = param_name
return shortname
return param_name
@staticmethod
def a_ ( a__ : Optional[Any] , a__ : List[Any] ) -> Tuple:
'''simple docstring'''
_A = TrialShortNamer.shortname_for_key(a__ , a__ )
_A = short_name
_A = param_name
@classmethod
def a_ ( cls : str ) -> str:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_A = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(a__ , a__ )
_A = info
@classmethod
def a_ ( cls : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_A = cls.NAMING_INFO["short_param"][k]
if isinstance(a__ , a__ ):
_A = 1 if v else 0
_A = "" if isinstance(a__ , (int, float) ) else "-"
_A = F"""{key}{sep}{v}"""
name.append(a__ )
return "_".join(a__ )
@classmethod
def a_ ( cls : Dict , a__ : int ) -> Tuple:
'''simple docstring'''
_A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_A = []
else:
_A = repr.split("_" )
_A = {}
for value in values:
if "-" in value:
_A , _A = value.split("-" )
else:
_A = re.sub("[0-9.]" , "" , a__ )
_A = float(re.sub("[^0-9.]" , "" , a__ ) )
_A = cls.NAMING_INFO["reverse_short_param"][p_k]
_A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_A = cls.DEFAULTS[k]
return parameters | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'distilbert'
__UpperCamelCase = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Any , a__ : Union[str, Any]=3_05_22 , a__ : Optional[Any]=5_12 , a__ : Union[str, Any]=False , a__ : Optional[Any]=6 , a__ : Any=12 , a__ : Union[str, Any]=7_68 , a__ : List[str]=4 * 7_68 , a__ : Any=0.1 , a__ : Dict=0.1 , a__ : List[Any]="gelu" , a__ : Dict=0.0_2 , a__ : int=0.1 , a__ : Optional[Any]=0.2 , a__ : Union[str, Any]=0 , **a__ : int , ) -> List[Any]:
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = sinusoidal_pos_embds
_A = n_layers
_A = n_heads
_A = dim
_A = hidden_dim
_A = dropout
_A = attention_dropout
_A = activation
_A = initializer_range
_A = qa_dropout
_A = seq_classif_dropout
super().__init__(**a__ , pad_token_id=a__ )
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 621 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( __lowercase ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowercase , __lowercase ) -> bool:
_A = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowercase ) )
# The ratio of the area for circle to square is pi/4.
_A = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def a__ ( __lowercase , __lowercase , __lowercase = 0.0 , __lowercase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(__lowercase , __lowercase ) ) for _ in range(__lowercase ) ) * (max_value - min_value)
def a__ ( __lowercase , __lowercase = 0.0 , __lowercase = 1.0 ) -> None:
def identity_function(__lowercase ) -> float:
return x
_A = area_under_curve_estimator(
__lowercase , __lowercase , __lowercase , __lowercase )
_A = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def a__ ( __lowercase ) -> None:
def function_to_integrate(__lowercase ) -> float:
return sqrt(4.0 - x * x )
_A = area_under_curve_estimator(
__lowercase , __lowercase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case ( unittest.TestCase):
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_A = "A painting of a squirrel eating a burger"
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a__ )
_A = replicate(a__ )
_A = shard(a__ )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a__ , jax.device_count() )
_A = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_A = "stabilityai/stable-diffusion-2"
_A , _A = FlaxDPMSolverMultistepScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , revision="bf16" , dtype=jnp.bfloataa , )
_A = scheduler_params
_A = "A painting of a squirrel eating a burger"
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a__ )
_A = replicate(a__ )
_A = shard(a__ )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a__ , jax.device_count() )
_A = sd_pipe(a__ , a__ , a__ , num_inference_steps=25 , jit=a__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
# Load configuration defined in the metadata file
with open(__lowercase ) as metadata_file:
_A = json.load(__lowercase )
_A = LukeConfig(use_entity_aware_attention=__lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_A = torch.load(__lowercase , map_location="cpu" )
# Load the entity vocab file
_A = load_entity_vocab(__lowercase )
_A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_A = AddedToken("<ent>" , lstrip=__lowercase , rstrip=__lowercase )
_A = AddedToken("<ent2>" , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
_A = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_A = state_dict["embeddings.word_embeddings.weight"]
_A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
_A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
_A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_A = f"""encoder.layer.{layer_index}.attention.self."""
_A = state_dict[prefix + matrix_name]
_A = state_dict[prefix + matrix_name]
_A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_A = state_dict["entity_embeddings.entity_embeddings.weight"]
_A = entity_emb[entity_vocab["[MASK]"]]
_A = LukeModel(config=__lowercase ).eval()
_A , _A = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
_A = LukeTokenizer.from_pretrained(__lowercase , task="entity_classification" )
_A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
_A = (39, 42)
_A = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors="pt" )
_A = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
_A = torch.Size((1, 42, 1024) )
_A = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_A = torch.Size((1, 42, 768) )
_A = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_A = torch.Size((1, 1, 1024) )
_A = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_A = torch.Size((1, 1, 768) )
_A = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowercase ) )
model.save_pretrained(__lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A = {}
with open(__lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__lowercase ):
_A , _A = line.rstrip().split("\t" )
_A = index
return entity_vocab
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def a_ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def a_ ( self : str ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a__ )
@property
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
def extract(*a__ : List[Any] , **a__ : str ):
class snake_case :
def __init__( self : int ) -> List[Any]:
'''simple docstring'''
_A = torch.ones([0] )
def a_ ( self : int , a__ : Dict ) -> Dict:
'''simple docstring'''
self.pixel_values.to(a__ )
return self
return Out()
return extract
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet
_A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_one=a__ , )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
_A = output.images
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=a__ )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
_A = output.images
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a__ )
assert isinstance(a__ , a__ )
assert isinstance(pipe.scheduler , a__ )
assert pipe.safety_checker is None
_A = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = StableDiffusionPipeline.from_pretrained(a__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_A = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=a__ )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
_A = unet.half()
_A = vae.half()
_A = bert.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Dict ) -> str:
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
_A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
_A = 40_03_66_03_46
_A = 7
# without safety guidance (sld_guidance_scale = 0)
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
_A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "padme amidala taking a bath artwork, safe for work, no nudity"
_A = 27_34_97_17_55
_A = 7
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
_A = 10_44_35_52_34
_A = 12
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_A = torch.manual_seed(a__ )
_A = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_A = output.images
_A = image[0, -3:, -3:, -1]
_A = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 621 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( __lowercase , __lowercase ) -> int:
_A = np.argmax(__lowercase , axis=1 )
return np.sum(outputs == labels )
def a__ ( __lowercase ) -> List[Any]:
with open(__lowercase , encoding="utf_8" ) as f:
_A = csv.reader(__lowercase )
_A = []
next(__lowercase ) # skip the first line
for line in tqdm(__lowercase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
_A = []
for dataset in encoded_datasets:
_A = len(__lowercase )
_A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_A = np.zeros((n_batch, 2) , dtype=np.intaa )
_A = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_A = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowercase ):
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = with_conta
_A = with_conta
_A = len(__lowercase ) - 1
_A = len(__lowercase ) - 1
_A = with_conta
_A = with_conta
_A = mc_label
_A = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowercase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ) -> List[str]:
_A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowercase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=__lowercase , default="" )
parser.add_argument("--eval_dataset" , type=__lowercase , default="" )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--num_train_epochs" , type=__lowercase , default=3 )
parser.add_argument("--train_batch_size" , type=__lowercase , default=8 )
parser.add_argument("--eval_batch_size" , type=__lowercase , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=__lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=__lowercase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=__lowercase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=__lowercase , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=__lowercase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=__lowercase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=__lowercase , default=0.01 )
parser.add_argument("--lm_coef" , type=__lowercase , default=0.9 )
parser.add_argument("--n_valid" , type=__lowercase , default=374 )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
print(__lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_A = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_A = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(__lowercase , __lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_A = ["_start_", "_delimiter_", "_classify_"]
_A = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowercase )
_A = tokenizer.convert_tokens_to_ids(__lowercase )
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowercase ) )
model.to(__lowercase )
# Load and encode the datasets
def tokenize_and_encode(__lowercase ):
if isinstance(__lowercase , __lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowercase ) )
elif isinstance(__lowercase , __lowercase ):
return obj
return [tokenize_and_encode(__lowercase ) for o in obj]
logger.info("Encoding dataset..." )
_A = load_rocstories_dataset(args.train_dataset )
_A = load_rocstories_dataset(args.eval_dataset )
_A = (train_dataset, eval_dataset)
_A = tokenize_and_encode(__lowercase )
# Compute the max input length for the Transformer
_A = model.config.n_positions // 2 - 2
_A = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_A = min(__lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_A = pre_process_datasets(__lowercase , __lowercase , __lowercase , *__lowercase )
_A , _A = tensor_datasets[0], tensor_datasets[1]
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.train_batch_size )
_A = TensorDataset(*__lowercase )
_A = SequentialSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_A = args.max_steps
_A = args.max_steps // (len(__lowercase ) // args.gradient_accumulation_steps) + 1
else:
_A = len(__lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_A = list(model.named_parameters() )
_A = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
_A = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
_A = AdamW(__lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
_A = get_linear_schedule_with_warmup(
__lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowercase )
if args.do_train:
_A , _A , _A = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
_A = 0
_A = 0
_A = tqdm(__lowercase , desc="Training" )
for step, batch in enumerate(__lowercase ):
_A = tuple(t.to(__lowercase ) for t in batch )
_A , _A , _A , _A = batch
_A = model(__lowercase , mc_token_ids=__lowercase , lm_labels=__lowercase , mc_labels=__lowercase )
_A = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_A = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_A = "Training loss: {:.2e} lr: {:.2e}".format(__lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_A = model.module if hasattr(__lowercase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_A = os.path.join(args.output_dir , __lowercase )
_A = os.path.join(args.output_dir , __lowercase )
torch.save(model_to_save.state_dict() , __lowercase )
model_to_save.config.to_json_file(__lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_A = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowercase )
if args.do_eval:
model.eval()
_A , _A = 0, 0
_A , _A = 0, 0
for batch in tqdm(__lowercase , desc="Evaluating" ):
_A = tuple(t.to(__lowercase ) for t in batch )
_A , _A , _A , _A = batch
with torch.no_grad():
_A , _A , _A , _A = model(
__lowercase , mc_token_ids=__lowercase , lm_labels=__lowercase , mc_labels=__lowercase )
_A = mc_logits.detach().cpu().numpy()
_A = mc_labels.to("cpu" ).numpy()
_A = accuracy(__lowercase , __lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_A = eval_loss / nb_eval_steps
_A = eval_accuracy / nb_eval_examples
_A = tr_loss / nb_tr_steps if args.do_train else None
_A = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
_A = os.path.join(args.output_dir , "eval_results.txt" )
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 621 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'cvt'
def __init__( self : List[Any] , a__ : List[str]=3 , a__ : List[str]=[7, 3, 3] , a__ : List[Any]=[4, 2, 2] , a__ : str=[2, 1, 1] , a__ : str=[64, 1_92, 3_84] , a__ : Optional[int]=[1, 3, 6] , a__ : str=[1, 2, 10] , a__ : str=[4.0, 4.0, 4.0] , a__ : Optional[int]=[0.0, 0.0, 0.0] , a__ : Any=[0.0, 0.0, 0.0] , a__ : Tuple=[0.0, 0.0, 0.1] , a__ : Optional[int]=[True, True, True] , a__ : Optional[int]=[False, False, True] , a__ : Any=["dw_bn", "dw_bn", "dw_bn"] , a__ : int=[3, 3, 3] , a__ : Any=[1, 1, 1] , a__ : Any=[2, 2, 2] , a__ : List[Any]=[1, 1, 1] , a__ : Dict=[1, 1, 1] , a__ : Tuple=0.0_2 , a__ : str=1E-1_2 , **a__ : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
_A = num_channels
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = embed_dim
_A = num_heads
_A = depth
_A = mlp_ratio
_A = attention_drop_rate
_A = drop_rate
_A = drop_path_rate
_A = qkv_bias
_A = cls_token
_A = qkv_projection_method
_A = kernel_qkv
_A = padding_kv
_A = stride_kv
_A = padding_q
_A = stride_q
_A = initializer_range
_A = layer_norm_eps | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case ( unittest.TestCase):
def __init__( self : Optional[Any] , a__ : Optional[int] , a__ : Union[str, Any]=7 , a__ : Optional[Any]=3 , a__ : List[str]=30 , a__ : Union[str, Any]=4_00 , a__ : Tuple=True , a__ : Any=None , a__ : Tuple=0.9 , a__ : Dict=None , a__ : Dict=True , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
_A = size if size is not None else {"shortest_edge": 30}
_A = crop_size if crop_size is not None else {"height": 30, "width": 30}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize_and_center_crop
_A = size
_A = crop_pct
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
_A = PoolFormerImageProcessingTester(self )
@property
def a_ ( self : str ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(a__ , "size" ) )
self.assertTrue(hasattr(a__ , "crop_pct" ) )
self.assertTrue(hasattr(a__ , "do_normalize" ) )
self.assertTrue(hasattr(a__ , "image_mean" ) )
self.assertTrue(hasattr(a__ , "image_std" ) )
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A = image_processing(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A = image_processing(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A = image_processing(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
"""simple docstring"""
a_ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
a_ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
a_ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a_ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
a_ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
a_ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
a_ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
a_ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
] | 621 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a_ = "bert-base-cased"
a_ = "google/pegasus-xsum"
a_ = [" Sam ate lunch today.", "Sams lunch ingredients."]
a_ = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
a_ = "patrickvonplaten/t5-tiny-random"
a_ = "sshleifer/bart-tiny-random"
a_ = "sshleifer/tiny-mbart"
a_ = "sshleifer/tiny-marian-en-de"
def a__ ( __lowercase , __lowercase ) -> Optional[int]:
_A = "\n".join(__lowercase )
Path(__lowercase ).open("w" ).writelines(__lowercase )
def a__ ( __lowercase ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowercase , f"""{split}.source""" ) , __lowercase )
_dump_articles(os.path.join(__lowercase , f"""{split}.target""" ) , __lowercase )
return tmp_dir
class snake_case ( _UpperCamelCase):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def a_ ( self : Tuple , a__ : List[Any] ) -> Any:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(a__ )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
_A = 4
_A = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_A , _A = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
_A = SeqaSeqDataset(
a__ , data_dir=a__ , type_path="train" , max_source_length=a__ , max_target_length=a__ , src_lang=a__ , tgt_lang=a__ , )
_A = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a__ , a__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_A = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def a_ ( self : Dict , a__ : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(a__ )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
_A = 4
_A = LegacySeqaSeqDataset(
a__ , data_dir=a__ , type_path="train" , max_source_length=20 , max_target_length=a__ , )
_A = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_A = tmp_dir.joinpath("train.source" ).open().readlines()
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a__ , a__ , 1_28 , a__ )
_A = {x.name for x in tmp_dir.iterdir()}
_A = {x.name for x in save_dir.iterdir()}
_A = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a__ ) < len(a__ )
assert len(a__ ) == 1
assert len(packed_examples[0] ) == sum(len(a__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_A , _A , _A = self._get_dataset(max_len=64 )
_A = 64
_A = ds.make_dynamic_sampler(a__ , required_batch_size_multiple=a__ )
_A = [len(a__ ) for x in batch_sampler]
assert len(set(a__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a__ ) == len(a__ ) # no dropped or added examples
_A = DataLoader(a__ , batch_sampler=a__ , collate_fn=ds.collate_fn , num_workers=2 )
_A = []
_A = []
for batch in data_loader:
_A = batch["input_ids"].shape
_A = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_A = np.product(batch["input_ids"].shape )
num_src_per_batch.append(a__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a__ )
assert num_src_per_batch[0] == max(a__ )
if failures:
raise AssertionError(F"""too many tokens in {len(a__ )} batches""" )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_A , _A , _A = self._get_dataset(max_len=5_12 )
_A = 2
_A = ds.make_sortish_sampler(a__ , shuffle=a__ )
_A = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 )
_A = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a__ )
_A = tokenizer.pad_token_id
def count_pad_tokens(a__ : int , a__ : str="input_ids" ):
return [batch[k].eq(a__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a__ , k="labels" ) ) < sum(count_pad_tokens(a__ , k="labels" ) )
assert sum(count_pad_tokens(a__ ) ) < sum(count_pad_tokens(a__ ) )
assert len(a__ ) == len(a__ )
def a_ ( self : Union[str, Any] , a__ : Dict=10_00 , a__ : Tuple=1_28 ) -> str:
'''simple docstring'''
if os.getenv("USE_REAL_DATA" , a__ ):
_A = "examples/seq2seq/wmt_en_ro"
_A = max_len * 2 * 64
if not Path(a__ ).joinpath("train.len" ).exists():
save_len_file(a__ , a__ )
else:
_A = "examples/seq2seq/test_data/wmt_en_ro"
_A = max_len * 4
save_len_file(a__ , a__ )
_A = AutoTokenizer.from_pretrained(a__ )
_A = SeqaSeqDataset(
a__ , data_dir=a__ , type_path="train" , max_source_length=a__ , max_target_length=a__ , n_obs=a__ , )
return ds, max_tokens, tokenizer
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A , _A , _A = self._get_dataset()
_A = set(DistributedSortishSampler(a__ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=a__ ) )
_A = set(DistributedSortishSampler(a__ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=a__ ) )
assert idsa.intersection(a__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def a_ ( self : List[Any] , a__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(a__ , use_fast=a__ )
if tok_name == MBART_TINY:
_A = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
_A = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_A = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
_A = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a__ ) == 1 if tok_name == BART_TINY else len(a__ ) == 0 | 621 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 1 |
"""simple docstring"""
a_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a_ = [{"type": "code", "content": INSTALL_CONTENT}]
a_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
} | 621 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def a__ ( __lowercase ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def a__ ( __lowercase , __lowercase ) -> int:
_A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_A = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_A = key.replace("image_encoder.module" , "flava.image_model" )
_A = key.replace("text_encoder.module" , "flava.text_model" )
_A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_A = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_A = key.replace("text_projection" , "flava.text_projection" )
_A = key.replace("image_projection" , "flava.image_projection" )
_A = value.float()
for key, value in codebook_state_dict.items():
_A = value
return upgrade
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Any:
if config_path is not None:
_A = FlavaConfig.from_pretrained(__lowercase )
else:
_A = FlavaConfig()
_A = FlavaForPreTraining(__lowercase ).eval()
_A = convert_dalle_checkpoint(__lowercase , __lowercase , save_checkpoint=__lowercase )
if os.path.exists(__lowercase ):
_A = torch.load(__lowercase , map_location="cpu" )
else:
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )
_A = upgrade_state_dict(__lowercase , __lowercase )
hf_model.load_state_dict(__lowercase )
_A = hf_model.state_dict()
_A = count_parameters(__lowercase )
_A = count_parameters(__lowercase ) + count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) | 621 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 1 |
"""simple docstring"""
import math
def a__ ( __lowercase , __lowercase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law") | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( __lowercase , __lowercase = 16 ) -> Tuple:
_A = AutoTokenizer.from_pretrained("bert-base-cased" )
_A = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A = datasets.map(
__lowercase , batched=__lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A = 16
elif accelerator.mixed_precision != "no":
_A = 8
else:
_A = None
return tokenizer.pad(
__lowercase , padding="longest" , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets["train"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
_A = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( __lowercase , __lowercase ) -> str:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __lowercase ) == "1":
_A = 2
# Initialize accelerator
_A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config["lr"]
_A = int(config["num_epochs"] )
_A = int(config["seed"] )
_A = int(config["batch_size"] )
_A = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowercase )
def inner_training_loop(__lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A = model.to(accelerator.device )
# Instantiate optimizer
_A = AdamW(params=model.parameters() , lr=__lowercase )
_A , _A = get_dataloaders(__lowercase , __lowercase )
# Instantiate scheduler
_A = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Now we train the model
for epoch in range(__lowercase ):
model.train()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A = model(**__lowercase )
_A = outputs.loss
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**__lowercase )
_A = outputs.logits.argmax(dim=-1 )
_A , _A = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> int:
_A = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__lowercase , default=__lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_A = parser.parse_args()
_A = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main() | 621 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 1 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def a__ ( __lowercase ) -> str:
for i in range(0 , __lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def a__ ( __lowercase ) -> Optional[int]:
for i in range(__lowercase , 0 , -1 ):
for _ in range(__lowercase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def a__ ( __lowercase ) -> Any:
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__lowercase ) # upper half
reverse_floyd(__lowercase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
a_ = 1
while K:
a_ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
a_ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...") | 621 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
_A = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(a__ )
from datasets import load_dataset
_A = load_dataset("nielsr/rvlcdip-demo" )
_A = dataset["train"][0]["image"].convert("RGB" )
_A = image_processor(a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
_A = torch.Size((1, 16) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=a__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) ) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase):
@property
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=a__ , scheduler=a__ )
sde_ve.to(a__ )
sde_ve.set_progress_bar_config(disable=a__ )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=a__ ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=a__ , return_dict=a__ )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = "google/ncsnpp-church-256"
_A = UNetaDModel.from_pretrained(a__ )
_A = ScoreSdeVeScheduler.from_pretrained(a__ )
_A = ScoreSdeVePipeline(unet=a__ , scheduler=a__ )
sde_ve.to(a__ )
sde_ve.set_progress_bar_config(disable=a__ )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 621 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
import numpy as np
import datasets
a_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
a_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
a_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def a_ ( self : Tuple , a__ : List[Any] , a__ : List[str] ) -> Dict:
'''simple docstring'''
_A = np.array(a__ )
_A = np.array(a__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_A = X - np.mean(a__ )
_A = np.cov(reference_distribution.T )
try:
_A = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
_A = np.linalg.pinv(a__ )
_A = np.dot(a__ , a__ )
_A = np.dot(a__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 621 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( _UpperCamelCase):
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(a__ , "depth_multiplier" ) )
class snake_case :
def __init__( self : str , a__ : Tuple , a__ : Any=13 , a__ : Any=3 , a__ : Optional[int]=32 , a__ : int=0.2_5 , a__ : str=8 , a__ : Union[str, Any]=8 , a__ : str=6 , a__ : Optional[int]=32 , a__ : List[Any]=True , a__ : Optional[Any]=True , a__ : str=True , a__ : Union[str, Any]="relu6" , a__ : str=12_80 , a__ : str=0.1 , a__ : int=0.0_2 , a__ : Any=True , a__ : List[Any]=True , a__ : Union[str, Any]=10 , a__ : int=None , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = depth_multiplier
_A = depth_divisible_by
_A = min_depth
_A = expand_ratio
_A = tf_padding
_A = output_stride
_A = first_layer_is_expansion
_A = finegrained_output
_A = hidden_act
_A = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
def a_ ( self : Any ) -> Tuple:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any] ) -> str:
'''simple docstring'''
_A = MobileNetVaModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def a_ ( self : List[str] , a__ : Any , a__ : List[str] , a__ : List[Any] , a__ : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.num_labels
_A = MobileNetVaForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Optional[int] , a__ : int , a__ : str , a__ : Tuple , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = self.num_labels
_A = MobileNetVaForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self : str ) -> int:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = MobileNetVaModelTester(self )
_A = MobileNetVaConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def a_ ( self : Any ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def a_ ( self : str ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def a_ ( self : int ) -> List[str]:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
def check_hidden_states_output(a__ : Optional[int] , a__ : List[str] , a__ : List[str] ):
_A = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a__ , a__ ) )
_A = outputs.hidden_states
_A = 16
self.assertEqual(len(a__ ) , a__ )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(a__ , a__ , a__ )
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
@slow
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileNetVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Union[str, Any]:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
# verify the logits
_A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , a__ )
_A = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
_A = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_A = model.to(a__ )
_A = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1E-4 ) ) | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
if index == r:
for j in range(__lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A = arr[i]
combination_util(__lowercase , __lowercase , __lowercase , index + 1 , __lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
# A temporary array to store all combination one by one
_A = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowercase , __lowercase , __lowercase , 0 , __lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a_ = "sshleifer/bart-tiny-random"
a_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(a__ )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A , *_A = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_A , *_A = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A , *_A = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_A , *_A = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a_ ( self : str ) -> int:
'''simple docstring'''
with self.assertRaises(a__ ):
create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=a__ , d=a__ ) | 621 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'donut-swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , a__ : Any=2_24 , a__ : Optional[int]=4 , a__ : Dict=3 , a__ : str=96 , a__ : Optional[Any]=[2, 2, 6, 2] , a__ : Optional[Any]=[3, 6, 12, 24] , a__ : Union[str, Any]=7 , a__ : Any=4.0 , a__ : Tuple=True , a__ : List[str]=0.0 , a__ : Dict=0.0 , a__ : Any=0.1 , a__ : Dict="gelu" , a__ : Optional[Any]=False , a__ : List[Any]=0.0_2 , a__ : str=1E-5 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(a__ )
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = layer_norm_eps
_A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(a__ ) - 1) ) | 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a__ ( ) -> Tuple:
_A = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_A = Dataset.from_dict(__lowercase )
return dataset
class snake_case ( _UpperCamelCase):
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = get_dataset()
_A = make_duplicate_clusters(a__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_A = get_dataset()
_A , _A = deduplicate_dataset(a__ )
self.assertEqual(len(a__ ) , 2 )
print(a__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a__ ) | 621 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
class snake_case ( _UpperCamelCase):
def __init__( self : int , a__ : PriorTransformer , a__ : CLIPVisionModel , a__ : CLIPImageProcessor , a__ : HeunDiscreteScheduler , a__ : ShapERenderer , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=a__ , image_encoder=a__ , image_processor=a__ , scheduler=a__ , renderer=a__ , )
def a_ ( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Dict , a__ : List[Any] , a__ : List[str] , a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
_A = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_A = latents.to(a__ )
_A = latents * scheduler.init_noise_sigma
return latents
def a_ ( self : Optional[int] , a__ : Tuple=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_A = torch.device(F"""cuda:{gpu_id}""" )
_A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
@property
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(a__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a_ ( self : Tuple , a__ : str , a__ : Union[str, Any] , a__ : Tuple , a__ : Any , ) -> List[str]:
'''simple docstring'''
if isinstance(a__ , a__ ) and isinstance(image[0] , torch.Tensor ):
_A = torch.cat(a__ , axis=0 ) if image[0].ndim == 4 else torch.stack(a__ , axis=0 )
if not isinstance(a__ , torch.Tensor ):
_A = self.image_processor(a__ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_A = image.to(dtype=self.image_encoder.dtype , device=a__ )
_A = self.image_encoder(a__ )["last_hidden_state"]
_A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A = image_embeds.repeat_interleave(a__ , dim=0 )
if do_classifier_free_guidance:
_A = torch.zeros_like(a__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : List[Any] , a__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , a__ : int = 1 , a__ : int = 25 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : float = 4.0 , a__ : int = 64 , a__ : Optional[str] = "pil" , a__ : bool = True , ) -> Tuple:
'''simple docstring'''
if isinstance(a__ , PIL.Image.Image ):
_A = 1
elif isinstance(a__ , torch.Tensor ):
_A = image.shape[0]
elif isinstance(a__ , a__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A = len(a__ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(a__ )}""" )
_A = self._execution_device
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_image(a__ , a__ , a__ , a__ )
# prior
self.scheduler.set_timesteps(a__ , device=a__ )
_A = self.scheduler.timesteps
_A = self.prior.config.num_embeddings
_A = self.prior.config.embedding_dim
_A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A = latents.reshape(latents.shape[0] , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(a__ , a__ )
_A = self.prior(
a__ , timestep=a__ , proj_embedding=a__ , ).predicted_image_embedding
# remove the variance
_A , _A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A = self.scheduler.step(
a__ , timestep=a__ , sample=a__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=a__ )
_A = []
for i, latent in enumerate(a__ ):
print()
_A = self.renderer.decode(
latent[None, :] , a__ , size=a__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(a__ )
_A = torch.stack(a__ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
_A = images.cpu().numpy()
if output_type == "pil":
_A = [self.numpy_to_pil(a__ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=a__ ) | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase):
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def a_ ( self : str ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(a__ )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , generator=a__ , num_inference_steps=5 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case :
__UpperCamelCase = MBartConfig
__UpperCamelCase = {}
__UpperCamelCase = 'gelu'
def __init__( self : Tuple , a__ : Optional[int] , a__ : int=13 , a__ : Dict=7 , a__ : List[Any]=True , a__ : Optional[Any]=False , a__ : Dict=99 , a__ : Union[str, Any]=32 , a__ : Tuple=2 , a__ : Union[str, Any]=4 , a__ : Tuple=37 , a__ : List[str]=0.1 , a__ : Dict=0.1 , a__ : List[str]=20 , a__ : Dict=2 , a__ : Tuple=1 , a__ : Any=0 , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A = tf.concat([input_ids, eos_tensor] , axis=1 )
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A = prepare_mbart_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def a_ ( self : Tuple , a__ : List[Any] , a__ : Dict ) -> Any:
'''simple docstring'''
_A = TFMBartModel(config=a__ ).get_decoder()
_A = inputs_dict["input_ids"]
_A = input_ids[:1, :]
_A = inputs_dict["attention_mask"][:1, :]
_A = inputs_dict["head_mask"]
_A = 1
# first forward pass
_A = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__ )
_A , _A = outputs.to_tuple()
_A = past_key_values[1]
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]:
if attention_mask is None:
_A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Any , a__ : List[str] , a__ : str , a__ : Tuple , a__ : List[Any] , a__ : int ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = TFMBartModelTester(self )
_A = ConfigTester(self , config_class=a__ )
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase):
__UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
]
__UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__UpperCamelCase = 'facebook/mbart-large-en-ro'
@cached_property
def a_ ( self : int ) -> Dict:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a_ ( self : str , **a__ : Any ) -> Union[str, Any]:
'''simple docstring'''
_A = self.translate_src_text(**a__ )
self.assertListEqual(self.expected_text , a__ )
def a_ ( self : Dict , **a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = self.tokenizer(self.src_text , **a__ , return_tensors="tf" )
_A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A = self.tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
return generated_words
@slow
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self._assert_generated_batch_equal_expected() | 621 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a_ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
a_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
a_ = dict(zip(vocab, range(len(vocab))))
a_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = Path(tmpdirname)
a_ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
a_ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
a_ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
a_ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a_ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a_ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a_ = tokenizer(["Making tiny model"], return_tensors="pt")
a_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 621 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case ( _UpperCamelCase):
def __lt__( self : List[Any] , a__ : Any ) -> Tuple:
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : List[Any] , a__ : Dict ) -> Dict:
'''simple docstring'''
return self[-1] == other[-1]
def a__ ( __lowercase ) -> list:
_A = []
# sort into stacks
for element in collection:
_A = Stack([element] )
_A = bisect_left(__lowercase , __lowercase )
if i != len(__lowercase ):
stacks[i].append(__lowercase )
else:
stacks.append(__lowercase )
# use a heap-based merge to merge stack efficiently
_A = merge(*(reversed(__lowercase ) for stack in stacks) )
return collection
if __name__ == "__main__":
a_ = input("Enter numbers separated by a comma:\n").strip()
a_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted)) | 621 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a_ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
a_ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
a_ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ ( __lowercase , __lowercase , __lowercase="binary" ) -> Optional[Any]:
_A = simple_accuracy(__lowercase , __lowercase )
_A = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( __lowercase , __lowercase ) -> List[str]:
_A = {}
for id_pred, label in zip(__lowercase , __lowercase ):
_A = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_A = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A = [(pred, label)]
_A , _A = [], []
for question, preds_labels in question_map.items():
_A , _A = zip(*__lowercase )
_A = fa_score(y_true=__lowercase , y_pred=__lowercase , average="macro" )
fas.append(__lowercase )
_A = int(sum(pred == label for pred, label in preds_labels ) == len(__lowercase ) )
ems.append(__lowercase )
_A = float(sum(__lowercase ) / len(__lowercase ) )
_A = sum(__lowercase ) / len(__lowercase )
_A = float(fa_score(y_true=__lowercase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def a_ ( self : str , a__ : Dict , a__ : Any ) -> int:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(a__ , a__ )}
elif self.config_name == "cb":
return acc_and_fa(a__ , a__ , fa_avg="macro" )
elif self.config_name == "record":
_A = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_A = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(a__ , a__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(a__ , a__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(a__ , a__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) | 621 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 1 |
"""simple docstring"""
from typing import Any
class snake_case :
def __init__( self : Optional[int] , a__ : Any ) -> str:
'''simple docstring'''
_A = data
_A = None
class snake_case :
def __init__( self : Optional[int] ) -> str:
'''simple docstring'''
_A = None
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.head
while temp is not None:
print(temp.data , end=" " )
_A = temp.next
print()
def a_ ( self : Optional[Any] , a__ : Any ) -> Dict:
'''simple docstring'''
_A = Node(a__ )
_A = self.head
_A = new_node
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : List[Any] ) -> Tuple:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
if node_a is None or node_a is None:
return
_A , _A = node_a.data, node_a.data
if __name__ == "__main__":
a_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list() | 621 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 1 |
"""simple docstring"""
a_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
a_ = frozenset(["prompt", "negative_prompt"])
a_ = frozenset([])
a_ = frozenset(["image"])
a_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
a_ = frozenset(["image"])
a_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
a_ = frozenset(["prompt", "image", "negative_prompt"])
a_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
a_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
a_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
a_ = frozenset(["image", "mask_image"])
a_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
a_ = frozenset(["example_image", "image", "mask_image"])
a_ = frozenset(["class_labels"])
a_ = frozenset(["class_labels"])
a_ = frozenset(["batch_size"])
a_ = frozenset([])
a_ = frozenset(["batch_size"])
a_ = frozenset([])
a_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
a_ = frozenset(["prompt", "negative_prompt"])
a_ = frozenset(["input_tokens"])
a_ = frozenset(["input_tokens"]) | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module):
def __init__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_A = nn.Linear(3 , 4 )
_A = nn.BatchNormad(4 )
_A = nn.Linear(4 , 5 )
def a_ ( self : str , a__ : str ) -> Dict:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class snake_case ( _UpperCamelCase):
def a_ ( self : List[str] , a__ : int , *a__ : Tuple , **a__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( _UpperCamelCase):
def a_ ( self : Optional[int] , a__ : int , a__ : Tuple ) -> int:
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase):
def a_ ( self : int ) -> List[str]:
'''simple docstring'''
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(a__ , a__ )
self.assertEqual(test_model._hf_hook , a__ )
self.assertTrue(hasattr(a__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , "_hf_hook" ) )
self.assertFalse(hasattr(a__ , "_old_forward" ) )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(a__ , a__ )
add_hook_to_module(a__ , a__ , append=a__ )
self.assertEqual(isinstance(test_model._hf_hook , a__ ) , a__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , "_hf_hook" ) )
self.assertFalse(hasattr(a__ , "_old_forward" ) )
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(x + 1 )
_A = test_model(x + 2 )
_A = PreForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PreForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
assert torch.allclose(a__ , a__ , atol=1E-5 )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(a__ )
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
assert torch.allclose(a__ , output + 2 , atol=1E-5 )
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(a__ )
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_A = True
_A = test_model(a__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a__ , AlignDevicesHook(io_same_device=a__ ) )
_A = torch.randn(2 , 3 ).to(0 )
_A = model(a__ )
self.assertEqual(output.device , torch.device(0 ) )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
_A = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ , offload_buffers=a__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() , offload_buffers=a__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) | 621 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'vit_mae'
def __init__( self : Optional[int] , a__ : Union[str, Any]=7_68 , a__ : Optional[int]=12 , a__ : Any=12 , a__ : List[Any]=30_72 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=0.0 , a__ : Any=0.0 , a__ : Optional[Any]=0.0_2 , a__ : Any=1E-1_2 , a__ : List[str]=2_24 , a__ : int=16 , a__ : Union[str, Any]=3 , a__ : Dict=True , a__ : Optional[Any]=16 , a__ : List[Any]=5_12 , a__ : List[Any]=8 , a__ : Tuple=20_48 , a__ : Optional[int]=0.7_5 , a__ : int=False , **a__ : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss | 621 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 1 |
"""simple docstring"""
import os
import sys
import unittest
a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
a_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = get_test_to_tester_mapping(a__ )
_A = get_test_to_tester_mapping(a__ )
_A = {"BertModelTest": "BertModelTester"}
_A = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = get_model_to_test_mapping(a__ )
_A = get_model_to_test_mapping(a__ )
_A = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_A = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = get_model_to_tester_mapping(a__ )
_A = get_model_to_tester_mapping(a__ )
_A = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_A = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ ) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : str = "▁" , a__ : bool = True , a__ : Union[str, AddedToken] = "<unk>" , a__ : Union[str, AddedToken] = "</s>" , a__ : Union[str, AddedToken] = "<pad>" , ) -> int:
'''simple docstring'''
_A = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_A = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_A = token_dict["token"]
_A = Tokenizer(Unigram() )
_A = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_A = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a__ , add_prefix_space=a__ ),
pre_tokenizers.Digits(individual_digits=a__ ),
pre_tokenizers.Punctuation(),
] )
_A = decoders.Metaspace(replacement=a__ , add_prefix_space=a__ )
_A = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_A = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(a__ , a__ )
def a_ ( self : Optional[Any] , a__ : Union[str, List[str]] , a__ : int = 80_00 , a__ : bool = True , ) -> List[Any]:
'''simple docstring'''
_A = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
if isinstance(a__ , a__ ):
_A = [files]
self._tokenizer.train(a__ , trainer=a__ )
self.add_unk_id()
def a_ ( self : str , a__ : Union[Iterator[str], Iterator[Iterator[str]]] , a__ : int = 80_00 , a__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
_A = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
self._tokenizer.train_from_iterator(a__ , trainer=a__ )
self.add_unk_id()
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = json.loads(self._tokenizer.to_str() )
_A = self.special_tokens["unk"]["id"]
_A = Tokenizer.from_str(json.dumps(a__ ) ) | 621 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
_A = int(np.ceil((x_end - xa) / step_size ) )
_A = np.zeros((n + 1,) )
_A = ya
_A = xa
for k in range(__lowercase ):
_A = y[k] + step_size * ode_func(__lowercase , y[k] )
_A = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = torch.nn.Linear(10 , 10 )
_A = torch.optim.SGD(model.parameters() , 0.1 )
_A = Accelerator()
_A = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state() | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 621 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 1 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : int , a__ : str=None , a__ : Dict=None ) -> int:
'''simple docstring'''
_A = data
_A = previous
_A = next_node
def __str__( self : Any ) -> str:
'''simple docstring'''
return F"""{self.data}"""
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.data
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.next
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.previous
class snake_case :
def __init__( self : Tuple , a__ : Any ) -> int:
'''simple docstring'''
_A = head
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_A = self.current.get_data()
_A = self.current.get_next()
return value
class snake_case :
def __init__( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = None # First node in list
_A = None # Last node in list
def __str__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = self.head
_A = []
while current is not None:
nodes.append(current.get_data() )
_A = current.get_next()
return " ".join(str(a__ ) for node in nodes )
def __contains__( self : int , a__ : int ) -> Any:
'''simple docstring'''
_A = self.head
while current:
if current.get_data() == value:
return True
_A = current.get_next()
return False
def __iter__( self : str ) -> Tuple:
'''simple docstring'''
return LinkedListIterator(self.head )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def a_ ( self : Any , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
_A = node
_A = node
else:
self.insert_before_node(self.head , a__ )
def a_ ( self : Tuple , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(a__ )
else:
self.insert_after_node(self.tail , a__ )
def a_ ( self : Union[str, Any] , a__ : int ) -> None:
'''simple docstring'''
_A = Node(a__ )
if self.head is None:
self.set_head(a__ )
else:
self.set_tail(a__ )
def a_ ( self : Dict , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.previous
if node.get_previous() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : Optional[Any] , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.next
if node.get_next() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : int , a__ : int , a__ : int ) -> None:
'''simple docstring'''
_A = 1
_A = Node(a__ )
_A = self.head
while node:
if current_position == position:
self.insert_before_node(a__ , a__ )
return
current_position += 1
_A = node.next
self.insert_after_node(self.tail , a__ )
def a_ ( self : str , a__ : int ) -> Node:
'''simple docstring'''
_A = self.head
while node:
if node.get_data() == item:
return node
_A = node.get_next()
raise Exception("Node not found" )
def a_ ( self : Optional[Any] , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if (node := self.get_node(a__ )) is not None:
if node == self.head:
_A = self.head.get_next()
if node == self.tail:
_A = self.tail.get_previous()
self.remove_node_pointers(a__ )
@staticmethod
def a_ ( a__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
_A = node.previous
if node.get_previous():
_A = node.next
_A = None
_A = None
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.head is None
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_A = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def a_ ( self : str ) -> Dict:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(a__ , env=os.environ.copy() )
if __name__ == "__main__":
a_ = Accelerator()
a_ = (accelerator.state.process_index + 2, 10)
a_ = torch.randint(0, 10, shape).to(accelerator.device)
a_ = ""
a_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 1 |
"""simple docstring"""
from functools import lru_cache
def a__ ( __lowercase ) -> set:
_A = 2
_A = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowercase )
if n > 1:
factors.add(__lowercase )
return factors
@lru_cache
def a__ ( __lowercase ) -> int:
return len(unique_prime_factors(__lowercase ) )
def a__ ( __lowercase ) -> bool:
return len(set(__lowercase ) ) in (0, 1)
def a__ ( __lowercase ) -> list:
_A = 2
while True:
# Increment each value of a generated range
_A = [base + i for i in range(__lowercase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_A = [upf_len(__lowercase ) for x in group]
checker.append(__lowercase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowercase ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __lowercase = 4 ) -> int:
_A = run(__lowercase )
return results[0] if len(__lowercase ) else None
if __name__ == "__main__":
print(solution()) | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = "src/transformers"
a_ = "docs/source/en/tasks"
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_A = f.readlines()
# Find the start prompt.
_A = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
_A = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def a__ ( __lowercase ) -> Dict:
_A = TASK_GUIDE_TO_MODELS[task_guide]
_A = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowercase , set() )
_A = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A , _A , _A , _A = _find_text_in_file(
filename=os.path.join(__lowercase , __lowercase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_A = get_model_list_for_task(__lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__lowercase , __lowercase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 621 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 1 |
"""simple docstring"""
from math import pi, sqrt, tan
def a__ ( __lowercase ) -> float:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __lowercase , __lowercase , __lowercase ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __lowercase ) -> float:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __lowercase ) -> float:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __lowercase , __lowercase ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __lowercase , __lowercase , __lowercase ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __lowercase , __lowercase ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __lowercase , __lowercase ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__lowercase , 2 ) * torus_radius * tube_radius
def a__ ( __lowercase , __lowercase ) -> float:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __lowercase ) -> float:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __lowercase , __lowercase ) -> float:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __lowercase , __lowercase , __lowercase ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __lowercase , __lowercase ) -> float:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __lowercase , __lowercase , __lowercase ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __lowercase ) -> float:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __lowercase , __lowercase ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __lowercase , __lowercase ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __lowercase , __lowercase ) -> float:
if not isinstance(__lowercase , __lowercase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.