code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def A ( _lowerCamelCase ):
'''simple docstring'''
assert column_title.isupper()
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = len(_lowerCamelCase ) - 1
_lowerCAmelCase : List[str] = 0
while index >= 0:
_lowerCAmelCase : Tuple = (ord(column_title[index] ) - 64) * pow(26 , _lowerCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = array[indexa], array[indexa]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : List[str] = int(length / 2 )
for i in range(_lowerCamelCase , low + middle ):
comp_and_swap(_lowerCamelCase , _lowerCamelCase , i + middle , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , low + middle , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : Any = int(length / 2 )
bitonic_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
bitonic_sort(_lowerCamelCase , low + middle , _lowerCamelCase , 0 )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
class UpperCAmelCase_ :
def __init__( self, __a, __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : List[Any] = previous
_lowerCAmelCase : Tuple = next_node
def __str__( self):
'''simple docstring'''
return f"{self.data}"
def snake_case__ ( self):
'''simple docstring'''
return self.data
def snake_case__ ( self):
'''simple docstring'''
return self.next
def snake_case__ ( self):
'''simple docstring'''
return self.previous
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = head
def __iter__( self):
'''simple docstring'''
return self
def snake_case__ ( self):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_lowerCAmelCase : str = self.current.get_data()
_lowerCAmelCase : List[Any] = self.current.get_next()
return value
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = None # First node in list
_lowerCAmelCase : int = None # Last node in list
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : int = self.head
_lowerCAmelCase : Any = []
while current is not None:
nodes.append(current.get_data())
_lowerCAmelCase : List[str] = current.get_next()
return " ".join(str(__a) for node in nodes)
def __contains__( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase : List[Any] = current.get_next()
return False
def __iter__( self):
'''simple docstring'''
return LinkedListIterator(self.head)
def snake_case__ ( self):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def snake_case__ ( self):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def snake_case__ ( self, __a):
'''simple docstring'''
if self.head is None:
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : List[str] = node
else:
self.insert_before_node(self.head, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if self.head is None:
self.set_head(__a)
else:
self.insert_after_node(self.tail, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Node(__a)
if self.head is None:
self.set_head(__a)
else:
self.set_tail(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = node
_lowerCAmelCase : str = node.previous
if node.get_previous() is None:
_lowerCAmelCase : Tuple = node_to_insert
else:
_lowerCAmelCase : str = node_to_insert
_lowerCAmelCase : Optional[Any] = node_to_insert
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = node
_lowerCAmelCase : int = node.next
if node.get_next() is None:
_lowerCAmelCase : Optional[int] = node_to_insert
else:
_lowerCAmelCase : List[str] = node_to_insert
_lowerCAmelCase : Tuple = node_to_insert
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = Node(__a)
_lowerCAmelCase : Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(__a, __a)
return
current_position += 1
_lowerCAmelCase : Any = node.next
self.insert_after_node(self.tail, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase : Any = node.get_next()
raise Exception("Node not found")
def snake_case__ ( self, __a):
'''simple docstring'''
if (node := self.get_node(__a)) is not None:
if node == self.head:
_lowerCAmelCase : Optional[int] = self.head.get_next()
if node == self.tail:
_lowerCAmelCase : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(__a)
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
if node.get_next():
_lowerCAmelCase : Union[str, Any] = node.previous
if node.get_previous():
_lowerCAmelCase : Union[str, Any] = node.next
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[str] = None
def snake_case__ ( self):
'''simple docstring'''
return self.head is None
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = 'gelu'
def __init__( self, __a, __a=13, __a=7, __a=True, __a=False, __a=99, __a=32, __a=2, __a=4, __a=37, __a=0.1, __a=0.1, __a=20, __a=2, __a=1, __a=0, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = eos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowerCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowerCAmelCase : Any = tf.concat([input_ids, eos_tensor], axis=1)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
_lowerCAmelCase : List[Any] = prepare_led_inputs_dict(__a, __a, __a)
_lowerCAmelCase : List[Any] = tf.concat(
[tf.zeros_like(__a)[:, :-1], tf.ones_like(__a)[:, -1:]], axis=-1, )
_lowerCAmelCase : Dict = global_attention_mask
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLEDModel(config=__a).get_decoder()
_lowerCAmelCase : Optional[Any] = inputs_dict["input_ids"]
_lowerCAmelCase : str = input_ids[:1, :]
_lowerCAmelCase : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_lowerCAmelCase : List[str] = 1
# first forward pass
_lowerCAmelCase : Dict = model(__a, attention_mask=__a, use_cache=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : int = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
_lowerCAmelCase : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1)
_lowerCAmelCase : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1)
_lowerCAmelCase : Union[str, Any] = model(__a, attention_mask=__a)[0]
_lowerCAmelCase : int = model(__a, attention_mask=__a, past_key_values=__a)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
_lowerCAmelCase : int = int(ids_tensor((1,), output_from_past.shape[-1]))
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a, __a, rtol=1E-3)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase : Any = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLEDModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"])
_lowerCAmelCase : str = 2
_lowerCAmelCase : List[str] = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], )
_lowerCAmelCase : int = True
_lowerCAmelCase : Tuple = self.model_tester.seq_length
_lowerCAmelCase : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a):
_lowerCAmelCase : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(__a):
_lowerCAmelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[str] = model_class(__a)
_lowerCAmelCase : int = model(self._prepare_for_class(__a, __a))
_lowerCAmelCase : Any = len(__a)
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
if self.is_encoder_decoder:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Optional[int] = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_decoder_attentions_output(__a)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = model_class(__a)
_lowerCAmelCase : str = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
# Check attention is always last and order is fine
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Any = model_class(__a)
_lowerCAmelCase : Tuple = model(self._prepare_for_class(__a, __a))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(__a))
self.assertEqual(model.config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( _lowerCamelCase ):
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
_snake_case = 1e-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
_lowerCAmelCase : Tuple = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : int = prepare_led_inputs_dict(model.config, __a, __a)
_lowerCAmelCase : List[Any] = model(**__a)[0]
_lowerCAmelCase : str = (1, 1024, 768)
self.assertEqual(output.shape, __a)
# change to expected output here
_lowerCAmelCase : List[str] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]], )
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
_lowerCAmelCase : Any = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Tuple = prepare_led_inputs_dict(model.config, __a, __a)
_lowerCAmelCase : List[str] = model(**__a)[0]
_lowerCAmelCase : List[str] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, __a)
# change to expected output here
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]], )
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-3, rtol=1E-3)
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = original_name.split("." )[0]
_lowerCAmelCase : List[str] = key.split("." )
_lowerCAmelCase : Any = int(key_list[key_list.index(_lowerCamelCase ) - 2] )
_lowerCAmelCase : Dict = int(key_list[key_list.index(_lowerCamelCase ) - 1] )
_lowerCAmelCase : Optional[Any] = orig_block_num - offset
_lowerCAmelCase : int = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCAmelCase : Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase : List[str] = key[: key.find("proj" )]
_lowerCAmelCase : Optional[Any] = key.replace(_lowerCamelCase , F"patch_embeddings.{total_embed_found}." )
_lowerCAmelCase : str = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase : int = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCAmelCase : Any = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCAmelCase : Union[str, Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCAmelCase : Optional[int] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCAmelCase : Dict = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCAmelCase : List[str] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCAmelCase : Dict = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCAmelCase : List[str] = key.replace("head" , "classifier" )
_lowerCAmelCase : Dict = value
return new_state_dict
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase : Tuple = "huggingface/label-files"
_lowerCAmelCase : List[Any] = model_name[-3:]
_lowerCAmelCase : List[Any] = 1_000
_lowerCAmelCase : str = "imagenet-1k-id2label.json"
_lowerCAmelCase : Optional[Any] = (1, 1_000)
# set config attributes
_lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Any = idalabel
_lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase : List[str] = [2, 2, 6, 2]
_lowerCAmelCase : Dict = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : Optional[int] = 0.9
elif size == "s24":
_lowerCAmelCase : Dict = [4, 4, 12, 4]
_lowerCAmelCase : List[Any] = [64, 128, 320, 512]
_lowerCAmelCase : Optional[int] = 4.0
_lowerCAmelCase : Dict = 0.9
elif size == "s36":
_lowerCAmelCase : Dict = [6, 6, 18, 6]
_lowerCAmelCase : str = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : Union[str, Any] = 1e-6
_lowerCAmelCase : Optional[int] = 0.9
elif size == "m36":
_lowerCAmelCase : Union[str, Any] = [6, 6, 18, 6]
_lowerCAmelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCAmelCase : Tuple = 4.0
_lowerCAmelCase : Optional[Any] = 1e-6
_lowerCAmelCase : str = 0.95
elif size == "m48":
_lowerCAmelCase : List[str] = [8, 8, 24, 8]
_lowerCAmelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCAmelCase : Tuple = 4.0
_lowerCAmelCase : Tuple = 1e-6
_lowerCAmelCase : List[Any] = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
_lowerCAmelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
# Prepare image
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCAmelCase : Tuple = rename_keys(_lowerCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase : List[str] = PoolFormerForImageClassification(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Define image processor
_lowerCAmelCase : List[Any] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
_lowerCAmelCase : int = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCAmelCase : str = model(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase : str = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCAmelCase : Dict = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCAmelCase : Dict = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCAmelCase : Dict = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCAmelCase : Tuple = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Any = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : Any = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : int = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Union[str, Any] = True
elif name.split("." )[0] == "proj":
_lowerCAmelCase : Optional[Any] = fairseq_model.proj
_lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCAmelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Optional[int] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : List[Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
_lowerCAmelCase : Any = "weight"
else:
_lowerCAmelCase : str = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Optional[Any] = name.split("." )
_lowerCAmelCase : Optional[Any] = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def A ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : Optional[Any] = f.readlines()
_lowerCAmelCase : Union[str, Any] = [line.split(" " )[0] for line in lines]
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = WavaVecaConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = SpeechaTextaConfig.from_pretrained(
_lowerCamelCase , vocab_size=_lowerCamelCase , decoder_layers=_lowerCamelCase , do_stable_layer_norm=_lowerCamelCase )
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_lowerCAmelCase : Any = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : List[str] = WavaVecaModel(_lowerCamelCase )
_lowerCAmelCase : int = recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
_lowerCAmelCase : int = SpeechaTextaForCausalLM(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
_lowerCAmelCase : Optional[int] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
_lowerCAmelCase : Tuple = False
# add projection layer
_lowerCAmelCase : int = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : int = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : int = create_vocab_dict(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_lowerCamelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = hf_wavavec.config.to_dict()
_lowerCAmelCase : Union[str, Any] = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : int = "speech_to_text_2"
_lowerCAmelCase : Any = "wav2vec2"
_lowerCAmelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
from collections.abc import Sequence
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 0.0
for coeff in reversed(_lowerCamelCase ):
_lowerCAmelCase : Any = result * x + coeff
return result
if __name__ == "__main__":
_snake_case = (0.0, 0.0, 5.0, 9.3, 7.0)
_snake_case = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import re
from filelock import FileLock
try:
import nltk
_snake_case = True
except (ImportError, ModuleNotFoundError):
_snake_case = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( _lowerCamelCase ):
'''simple docstring'''
re.sub("<n>" , "" , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = [s]
_lowerCAmelCase : Dict = True
while queue:
_lowerCAmelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[int] = u
return visited[t]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = float("Inf" )
_lowerCAmelCase : Any = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : Dict = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCAmelCase : List[str] = sink
while v != source:
_lowerCAmelCase : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : str = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( _lowerCamelCase ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCAmelCase : Optional[int] = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [1, 2]
_lowerCAmelCase : Tuple = {"a": 1, "b": 2}
_lowerCAmelCase : Dict = {"a": [1, 2], "b": [3, 4]}
_lowerCAmelCase : Optional[int] = {"a": {"1": 1}, "b": 2}
_lowerCAmelCase : List[Any] = {"a": 1, "b": 2, "c": 3, "d": 4}
_lowerCAmelCase : List[Any] = [2, 3]
_lowerCAmelCase : Any = {"a": 2, "b": 3}
_lowerCAmelCase : Any = {"a": [2, 3], "b": [4, 5]}
_lowerCAmelCase : List[Any] = {"a": {"1": 2}, "b": 3}
_lowerCAmelCase : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=32, __a=3, __a=4, __a=[10, 20, 30, 40], __a=[2, 2, 3, 2], __a=True, __a=True, __a=37, __a="gelu", __a=10, __a=0.02, __a=["stage2", "stage3", "stage4"], __a=[2, 3, 4], __a=None, ):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Optional[int] = num_stages
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = out_features
_lowerCAmelCase : Optional[Any] = out_indices
_lowerCAmelCase : Tuple = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size], self.num_labels)
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=__a, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ConvNextModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ConvNextForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = ConvNextBackbone(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = ConvNextBackbone(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : str = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ConvNextModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self):
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a)
def snake_case__ ( self):
'''simple docstring'''
def check_hidden_states_output(__a, __a, __a):
_lowerCAmelCase : List[str] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__a), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = True
check_hidden_states_output(__a, __a, __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : str = True
check_hidden_states_output(__a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = ConvNextModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(__a)
_lowerCAmelCase : Optional[Any] = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
# verify the logits
_lowerCAmelCase : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : Union[str, Any] = torch.tensor([-0.0_260, -0.4_739, 0.1_911]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , a):
lowerCamelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase__ = ConvNextConfig
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ConvNextModelTester(self)
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_snake_case = logging.getLogger(__name__)
_snake_case = 50 # max width of layer names
_snake_case = 70 # max width of quantizer names
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=_lowerCamelCase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=_lowerCamelCase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=_lowerCamelCase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=_lowerCamelCase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=_lowerCamelCase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=_lowerCamelCase , type=_lowerCamelCase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=_lowerCamelCase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def A ( _lowerCamelCase ):
'''simple docstring'''
if args.calibrator == "max":
_lowerCAmelCase : Any = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
_lowerCAmelCase : Tuple = "histogram"
elif args.calibrator == "mse":
_lowerCAmelCase : str = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_lowerCAmelCase : int = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
_lowerCAmelCase : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ["embeddings"] , which="weight" , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [""] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def fusea(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
_lowerCAmelCase : Optional[int] = qq._amax.detach().item()
_lowerCAmelCase : int = qk._amax.detach().item()
_lowerCAmelCase : Optional[Any] = qv._amax.detach().item()
_lowerCAmelCase : Tuple = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
_lowerCAmelCase : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
_lowerCAmelCase : str = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
_lowerCAmelCase : Dict = mod.weight.shape[0]
_lowerCAmelCase : Any = mod._weight_quantizer._amax.detach()
_lowerCAmelCase : List[Any] = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowerCAmelCase : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowerCAmelCase : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowerCAmelCase : List[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_lowerCAmelCase : Tuple = amax
def A ( _lowerCamelCase , _lowerCamelCase=25 , _lowerCamelCase=180 , _lowerCamelCase=None ):
'''simple docstring'''
if ignore is None:
_lowerCAmelCase : Tuple = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [ignore]
_lowerCAmelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , "weight" ):
continue
_lowerCAmelCase : Any = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , "_input_quantizer" , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , "_weight_quantizer" , _lowerCamelCase )
if not hasattr(_lowerCamelCase , "weight" ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
_lowerCAmelCase : Any = F"Act:{input_q.extra_repr()}"
_lowerCAmelCase : List[Any] = F"Wgt:{weight_q.extra_repr()}"
_lowerCAmelCase : Any = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="both" , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_input_quantizer" , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_weight_quantizer" , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_input_quantizer" ) or hasattr(_lowerCamelCase , "_weight_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : List[str] = use_mask_token
_lowerCAmelCase : Union[str, Any] = use_absolute_position_embeddings
_lowerCAmelCase : int = use_relative_position_bias
_lowerCAmelCase : List[Any] = use_shared_relative_position_bias
_lowerCAmelCase : Optional[Any] = layer_scale_init_value
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : str = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Any = out_indices
_lowerCAmelCase : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Union[str, Any] = auxiliary_loss_weight
_lowerCAmelCase : Dict = auxiliary_channels
_lowerCAmelCase : List[Any] = auxiliary_num_convs
_lowerCAmelCase : Optional[Any] = auxiliary_concat_input
_lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase : Optional[Any] = tf.shape(_lowerCamelCase )
if tensor.shape == tf.TensorShape(_lowerCamelCase ):
return dynamic
_lowerCAmelCase : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCamelCase )]
def A ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCamelCase , name=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-5 , _lowerCamelCase=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase , _lowerCAmelCase : Tuple = tf.nn.moments(_lowerCamelCase , axes=[axis] , keepdims=_lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase : Union[str, Any] = [1] * inputs.shape.rank
_lowerCAmelCase : Optional[Any] = shape_list(_lowerCamelCase )[axis]
_lowerCAmelCase : int = tf.reshape(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = tf.reshape(_lowerCamelCase , _lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase : Dict = tf.nn.batch_normalization(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , offset=_lowerCamelCase , scale=_lowerCamelCase , variance_epsilon=_lowerCamelCase , )
return outputs
def A ( _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase : Dict = tf.shape(_lowerCamelCase )
_lowerCAmelCase : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , tf.Tensor ):
_lowerCAmelCase : Dict = tf.convert_to_tensor(_lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase : List[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
_lowerCamelCase , tf.cast(_lowerCamelCase , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCamelCase )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase : Union[str, Any] = [x for x in data if len(_lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
_lowerCAmelCase : Union[str, Any] = np.asarray(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Optional[int] = np.array_split(_lowerCamelCase , _lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase : str = np.array_split(_lowerCamelCase , _lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = chunk_data
else:
_lowerCAmelCase : Dict = data
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if name in group.attrs:
_lowerCAmelCase : Dict = [n.decode("utf8" ) if hasattr(_lowerCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(_lowerCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( _lowerCamelCase ):
'''simple docstring'''
def _expand_single_ad_tensor(_lowerCamelCase ):
if isinstance(_lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCamelCase )
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_snake_case = "main"
# Default branch name
_snake_case = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
_snake_case = "aaaaaaa"
# This commit does not exist, so we should 404.
_snake_case = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
_snake_case = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def A ( ):
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def A ( ):
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), ["labels"])
self.assertEqual(find_labels(__a), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(__a), ["start_positions", "end_positions"])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), ["labels"])
@require_tf
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), ["labels"])
self.assertEqual(find_labels(__a), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(__a), ["start_positions", "end_positions"])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), ["labels"])
@require_flax
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), [])
self.assertEqual(find_labels(__a), [])
self.assertEqual(find_labels(__a), [])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), [])
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = flatten_dict(_lowerCamelCase )
return flax_params
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Optional[Any] = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
_lowerCAmelCase : str = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_lowerCAmelCase : Optional[int] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_lowerCAmelCase : Any = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_lowerCAmelCase : List[str] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_lowerCAmelCase : int = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : str = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_lowerCAmelCase : List[Any] = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = flax_dict[key]
_lowerCAmelCase : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_lowerCAmelCase : Tuple = torch.from_numpy(converted_dict[key].T )
else:
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = get_flax_param(_lowerCamelCase )
if not use_large:
_lowerCAmelCase : List[Any] = PixaStructVisionConfig()
_lowerCAmelCase : Tuple = PixaStructTextConfig()
else:
_lowerCAmelCase : Any = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_lowerCAmelCase : Dict = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_lowerCAmelCase : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = PixaStructForConditionalGeneration(_lowerCamelCase )
_lowerCAmelCase : List[str] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
_lowerCAmelCase : int = PixaStructImageProcessor()
_lowerCAmelCase : str = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
_lowerCAmelCase : Any = 4_096
_lowerCAmelCase : Optional[int] = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print("Model saved in {}".format(_lowerCamelCase ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'unispeech'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="mean", __a=False, __a=False, __a=256, __a=80, __a=0, __a=1, __a=2, __a=0.5, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Tuple = feat_extract_activation
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : Tuple = conv_bias
_lowerCAmelCase : Optional[int] = num_conv_pos_embeddings
_lowerCAmelCase : int = num_conv_pos_embedding_groups
_lowerCAmelCase : Union[str, Any] = len(self.conv_dim)
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : str = final_dropout
_lowerCAmelCase : List[str] = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = num_ctc_classes
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = do_stable_layer_norm
_lowerCAmelCase : str = use_weighted_layer_sum
_lowerCAmelCase : int = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : Dict = mask_time_prob
_lowerCAmelCase : Dict = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : Any = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Any = num_codevectors_per_group
_lowerCAmelCase : List[Any] = num_codevector_groups
_lowerCAmelCase : Dict = contrastive_logits_temperature
_lowerCAmelCase : Union[str, Any] = feat_quantizer_dropout
_lowerCAmelCase : Union[str, Any] = num_negatives
_lowerCAmelCase : Any = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : Union[str, Any] = ctc_zero_infinity
# pretraining loss
_lowerCAmelCase : str = replace_prob
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
_snake_case = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_snake_case = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_snake_case = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
], )
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__a, __a, sample_weight=__a)),
}
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowerCAmelCase : Any = grid[0]
for row_n in range(1 , len(_lowerCamelCase ) ):
_lowerCAmelCase : Dict = grid[row_n]
_lowerCAmelCase : Tuple = fill_row(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = grid[row_n]
return grid[-1][-1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'timm_backbone'
def __init__( self, __a=None, __a=3, __a=True, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = backbone
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Any = features_only
_lowerCAmelCase : List[Any] = use_pretrained_backbone
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = out_indices if out_indices is not None else (-1,)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 10 << 20 # 10MB
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = JsonConfig
def snake_case__ ( self):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
_lowerCAmelCase : int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : Tuple = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : Union[str, Any] = data_files
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : int = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
_lowerCAmelCase : Optional[Any] = self.config.features.arrow_schema.field(__a).type
_lowerCAmelCase : str = pa_table.append_column(__a, pa.array([None] * len(__a), type=__a))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : List[Any] = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__a)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Any = json.load(__a)
# We keep only the field we are interested in
_lowerCAmelCase : Union[str, Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__a, (list, tuple)):
_lowerCAmelCase : List[Any] = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : Any = {col: [row.get(__a) for row in dataset] for col in keys}
else:
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Dict = pa.Table.from_pydict(__a)
yield file_idx, self._cast_table(__a)
# If the file has one json object per line
else:
with open(__a, "rb") as f:
_lowerCAmelCase : Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowerCAmelCase : List[Any] = max(self.config.chunksize // 32, 16 << 10)
_lowerCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
_lowerCAmelCase : Any = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__a)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowerCAmelCase : int = batch.decode(self.config.encoding, errors=__a).encode("utf-8")
try:
while True:
try:
_lowerCAmelCase : Optional[Any] = paj.read_json(
io.BytesIO(__a), read_options=paj.ReadOptions(block_size=__a))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__a, pa.ArrowInvalid)
and "straddling" not in str(__a)
or block_size > len(__a)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(__a)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Dict = json.load(__a)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__a, __a): # list is the only sequence type supported in JSON
try:
_lowerCAmelCase : str = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : str = {col: [row.get(__a) for row in dataset] for col in keys}
_lowerCAmelCase : List[str] = pa.Table.from_pydict(__a)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(__a)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a)
batch_idx += 1
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bert-generation'
def __init__( self, __a=5_0358, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=0.02, __a=1E-12, __a=0, __a=2, __a=1, __a="absolute", __a=True, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Any = use_cache
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=12, __a=7, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a=0.1, __a=0.1, __a=512, __a=0.02, __a=0, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = projection_dim
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Optional[int] = bos_token_id
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
_lowerCAmelCase : Union[str, Any] = input_mask.numpy()
_lowerCAmelCase , _lowerCAmelCase : int = input_mask.shape
_lowerCAmelCase : List[str] = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(__a):
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a)
def snake_case__ ( self):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFBlipTextModel(config=__a)
_lowerCAmelCase : List[Any] = model(__a, attention_mask=__a, training=__a)
_lowerCAmelCase : str = model(__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BlipTextModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = TFBlipTextModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self, __a=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__a)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 0.0
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = []
for i in range(self.num_layers):
_lowerCAmelCase : Dict = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase : List[Any] = FlaxResnetBlockaD(
in_channels=__a, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__a)
_lowerCAmelCase : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__a)
_lowerCAmelCase : List[Any] = resnets
_lowerCAmelCase : List[str] = attentions
if self.add_downsample:
_lowerCAmelCase : Union[str, Any] = FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self, __a, __a, __a, __a=True):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ()
for resnet, attn in zip(self.resnets, self.attentions):
_lowerCAmelCase : Any = resnet(__a, __a, deterministic=__a)
_lowerCAmelCase : Optional[Any] = attn(__a, __a, deterministic=__a)
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase : Optional[int] = self.downsamplers_a(__a)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 0.0
lowerCamelCase__ = 1
lowerCamelCase__ = True
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i in range(self.num_layers):
_lowerCAmelCase : Any = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase : int = FlaxResnetBlockaD(
in_channels=__a, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__a)
_lowerCAmelCase : int = resnets
if self.add_downsample:
_lowerCAmelCase : int = FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self, __a, __a, __a=True):
'''simple docstring'''
_lowerCAmelCase : Tuple = ()
for resnet in self.resnets:
_lowerCAmelCase : str = resnet(__a, __a, deterministic=__a)
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase : Any = self.downsamplers_a(__a)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 0.0
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Union[str, Any] = []
for i in range(self.num_layers):
_lowerCAmelCase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__a)
_lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__a)
_lowerCAmelCase : List[str] = resnets
_lowerCAmelCase : Any = attentions
if self.add_upsample:
_lowerCAmelCase : List[Any] = FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self, __a, __a, __a, __a, __a=True):
'''simple docstring'''
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
_lowerCAmelCase : Any = res_hidden_states_tuple[-1]
_lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
_lowerCAmelCase : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowerCAmelCase : List[Any] = resnet(__a, __a, deterministic=__a)
_lowerCAmelCase : Optional[Any] = attn(__a, __a, deterministic=__a)
if self.add_upsample:
_lowerCAmelCase : List[str] = self.upsamplers_a(__a)
return hidden_states
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 0.0
lowerCamelCase__ = 1
lowerCamelCase__ = True
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = []
for i in range(self.num_layers):
_lowerCAmelCase : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase : str = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__a)
_lowerCAmelCase : Optional[int] = resnets
if self.add_upsample:
_lowerCAmelCase : Any = FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self, __a, __a, __a, __a=True):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
_lowerCAmelCase : Any = res_hidden_states_tuple[:-1]
_lowerCAmelCase : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowerCAmelCase : Optional[Any] = resnet(__a, __a, deterministic=__a)
if self.add_upsample:
_lowerCAmelCase : Union[str, Any] = self.upsamplers_a(__a)
return hidden_states
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = 0.0
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
_lowerCAmelCase : Any = []
for _ in range(self.num_layers):
_lowerCAmelCase : int = FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__a)
_lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__a)
_lowerCAmelCase : Optional[Any] = resnets
_lowerCAmelCase : Optional[int] = attentions
def __call__( self, __a, __a, __a, __a=True):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.resnets[0](__a, __a)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
_lowerCAmelCase : Union[str, Any] = attn(__a, __a, deterministic=__a)
_lowerCAmelCase : str = resnet(__a, __a, deterministic=__a)
return hidden_states
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : int = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCAmelCase : Tuple = 2
# New Code #
_lowerCAmelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Tuple = config["lr"]
_lowerCAmelCase : List[str] = int(config["num_epochs"] )
_lowerCAmelCase : Any = int(config["seed"] )
_lowerCAmelCase : Union[str, Any] = int(config["batch_size"] )
_lowerCAmelCase : Union[str, Any] = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
_lowerCAmelCase : Dict = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Dict = model(**_lowerCamelCase )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : int = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor((1, 3, 128, 128), rng=random.Random(__a))
_lowerCAmelCase : Optional[Any] = np.random.RandomState(__a)
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Tuple = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
# warmup pass to apply optimizations
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs())
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ort.SessionOptions()
_lowerCAmelCase : Dict = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Dict = init_image.resize((768, 512))
# using the PNDM scheduler by default
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : str = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=__a, output_type="np", )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : int = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Tuple = init_image.resize((768, 512))
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=__a, safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Optional[int] = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=__a, output_type="np", )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Any = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_snake_case = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_snake_case = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_snake_case = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="http://www.cs.umd.edu/~snover/tercom/", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}), codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"], reference_urls=[
"https://github.com/jhclark/tercom",
], )
def snake_case__ ( self, __a, __a, __a = False, __a = False, __a = False, __a = False, ):
'''simple docstring'''
_lowerCAmelCase : str = len(references[0])
if any(len(__a) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
_lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(__a)]
_lowerCAmelCase : Tuple = TER(
normalized=__a, no_punct=__a, asian_support=__a, case_sensitive=__a, )
_lowerCAmelCase : Optional[int] = sb_ter.corpus_score(__a, __a)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'lxmert'
lowerCamelCase__ = {}
def __init__( self, __a=3_0522, __a=768, __a=12, __a=9500, __a=1600, __a=400, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=9, __a=5, __a=5, __a=2048, __a=4, __a=6.67, __a=True, __a=True, __a=True, __a=True, __a=True, __a=True, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Tuple = num_qa_labels
_lowerCAmelCase : Union[str, Any] = num_object_labels
_lowerCAmelCase : Optional[Any] = num_attr_labels
_lowerCAmelCase : Any = l_layers
_lowerCAmelCase : Any = x_layers
_lowerCAmelCase : Optional[Any] = r_layers
_lowerCAmelCase : Any = visual_feat_dim
_lowerCAmelCase : List[str] = visual_pos_dim
_lowerCAmelCase : Optional[Any] = visual_loss_normalizer
_lowerCAmelCase : Optional[int] = task_matched
_lowerCAmelCase : str = task_mask_lm
_lowerCAmelCase : Optional[Any] = task_obj_predict
_lowerCAmelCase : str = task_qa
_lowerCAmelCase : Union[str, Any] = visual_obj_loss
_lowerCAmelCase : Union[str, Any] = visual_attr_loss
_lowerCAmelCase : Dict = visual_feat_loss
_lowerCAmelCase : List[str] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__a)
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_snake_case = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_snake_case = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_snake_case = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : List[str] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : Any = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Optional[int] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : Tuple = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : List[str] = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : Union[str, Any] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_snake_case = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_snake_case = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_snake_case, _snake_case, _snake_case = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[column] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
_lowerCAmelCase : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : Any = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowerCamelCase ):
for j in range(max(0 , i - 6 ) , _lowerCamelCase ):
_lowerCAmelCase : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : Any = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowerCamelCase , _lowerCamelCase )
# recursion
_lowerCAmelCase : str = points_counts // 2
_lowerCAmelCase : Any = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[:mid] , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCAmelCase : Tuple = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = dis_between_closest_in_strip(
_lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
return min(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = column_based_sort(_lowerCamelCase , column=0 )
_lowerCAmelCase : Optional[Any] = column_based_sort(_lowerCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=__a, )
assert hasattr(self, "env")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_lowerCAmelCase : str = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=__a, instance_count=__a, instance_type=self.instance_type, debugger_hook_config=__a, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=__a, py_version="py36", )
def snake_case__ ( self, __a):
'''simple docstring'''
TrainingJobAnalytics(__a).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.create_estimator(__a)
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __a)
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a , a):
lowerCamelCase__ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self, __a, __a, __a = None, __a = 5_0257, __a = 1024, __a = 768, __a = 12, __a = 12, __a = None, __a = "gelu_new", __a = 0.1, __a = 0.1, __a = 0.1, __a = 1E-5, __a = 0.02, __a = True, __a = True, __a = False, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal.")
_lowerCAmelCase : Union[str, Any] = prefix_inner_dim
_lowerCAmelCase : List[str] = prefix_hidden_dim
_lowerCAmelCase : Optional[Any] = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCAmelCase : str = (
nn.Linear(self.prefix_hidden_dim, __a) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCAmelCase : List[str] = GPTaConfig(
vocab_size=__a, n_positions=__a, n_embd=__a, n_layer=__a, n_head=__a, n_inner=__a, activation_function=__a, resid_pdrop=__a, embd_pdrop=__a, attn_pdrop=__a, layer_norm_epsilon=__a, initializer_range=__a, scale_attn_weights=__a, use_cache=__a, scale_attn_by_inverse_layer_idx=__a, reorder_and_upcast_attn=__a, )
_lowerCAmelCase : Optional[Any] = GPTaLMHeadModel(__a)
def snake_case__ ( self, __a, __a, __a = None, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.transformer.transformer.wte(__a)
_lowerCAmelCase : Tuple = self.encode_prefix(__a)
_lowerCAmelCase : Any = self.decode_prefix(__a)
_lowerCAmelCase : int = torch.cat((prefix_embeds, embedding_text), dim=1)
if labels is not None:
_lowerCAmelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0], input_ids.device)
_lowerCAmelCase : Union[str, Any] = torch.cat((dummy_token, input_ids), dim=1)
_lowerCAmelCase : int = self.transformer(inputs_embeds=__a, labels=__a, attention_mask=__a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.zeros(__a, self.prefix_length, dtype=torch.intaa, device=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encode_prefix(__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.split(__a, 1, dim=0)
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for feature in features:
_lowerCAmelCase : Any = self.decode_prefix(feature.to(__a)) # back to the clip feature
# Only support beam search for now
_lowerCAmelCase , _lowerCAmelCase : Dict = self.generate_beam(
input_embeds=__a, device=__a, eos_token_id=__a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
_lowerCAmelCase : int = torch.stack(__a)
_lowerCAmelCase : List[str] = torch.stack(__a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self, __a=None, __a=None, __a=None, __a = 5, __a = 67, __a = 1.0, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = torch.ones(__a, device=__a, dtype=torch.int)
_lowerCAmelCase : Dict = torch.zeros(__a, device=__a, dtype=torch.bool)
if input_embeds is not None:
_lowerCAmelCase : Dict = input_embeds
else:
_lowerCAmelCase : Any = self.transformer.transformer.wte(__a)
for i in range(__a):
_lowerCAmelCase : Tuple = self.transformer(inputs_embeds=__a)
_lowerCAmelCase : Dict = outputs.logits
_lowerCAmelCase : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCAmelCase : Any = logits.softmax(-1).log()
if scores is None:
_lowerCAmelCase , _lowerCAmelCase : Dict = logits.topk(__a, -1)
_lowerCAmelCase : Tuple = generated.expand(__a, *generated.shape[1:])
_lowerCAmelCase , _lowerCAmelCase : Dict = next_tokens.permute(1, 0), scores.squeeze(0)
if tokens is None:
_lowerCAmelCase : Dict = next_tokens
else:
_lowerCAmelCase : Tuple = tokens.expand(__a, *tokens.shape[1:])
_lowerCAmelCase : str = torch.cat((tokens, next_tokens), dim=1)
else:
_lowerCAmelCase : Union[str, Any] = -float(np.inf)
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCAmelCase : List[Any] = scores_sum / seq_lengths[:, None]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = scores_sum_average.view(-1).topk(__a, -1)
_lowerCAmelCase : List[Any] = next_tokens // scores_sum.shape[1]
_lowerCAmelCase : List[Any] = seq_lengths[next_tokens_source]
_lowerCAmelCase : Dict = next_tokens % scores_sum.shape[1]
_lowerCAmelCase : Tuple = next_tokens.unsqueeze(1)
_lowerCAmelCase : List[str] = tokens[next_tokens_source]
_lowerCAmelCase : str = torch.cat((tokens, next_tokens), dim=1)
_lowerCAmelCase : Optional[int] = generated[next_tokens_source]
_lowerCAmelCase : str = scores_sum_average * seq_lengths
_lowerCAmelCase : Tuple = is_stopped[next_tokens_source]
_lowerCAmelCase : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
_lowerCAmelCase : Union[str, Any] = torch.cat((generated, next_token_embed), dim=1)
_lowerCAmelCase : Optional[Any] = is_stopped + next_tokens.eq(__a).squeeze()
if is_stopped.all():
break
_lowerCAmelCase : Dict = scores / seq_lengths
_lowerCAmelCase : List[Any] = scores.argsort(descending=__a)
# tokens tensors are already padded to max_seq_length
_lowerCAmelCase : Optional[int] = [tokens[i] for i in order]
_lowerCAmelCase : int = torch.stack(__a, dim=0)
_lowerCAmelCase : int = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
from __future__ import annotations
_snake_case = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
_lowerCAmelCase : dict[str, str | None] = {}
_lowerCAmelCase : List[Any] = source_vertex
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {self.source_vertex}
_lowerCAmelCase : int = None
_lowerCAmelCase : Tuple = [self.source_vertex] # first in first out queue
while queue:
_lowerCAmelCase : Any = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__a)
_lowerCAmelCase : Optional[Any] = vertex
queue.append(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_lowerCAmelCase : Optional[int] = self.parent.get(__a)
if target_vertex_parent is None:
_lowerCAmelCase : int = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__a)
return self.shortest_path(__a) + f"->{target_vertex}"
if __name__ == "__main__":
_snake_case = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return [tuple(_lowerCamelCase )]
_lowerCAmelCase : str = []
def generate(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = [0] * n
res.append(tuple(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCAmelCase , _lowerCAmelCase : Dict = arr[i], arr[0]
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = arr[i], arr[c[i]]
res.append(tuple(_lowerCamelCase ) )
c[i] += 1
_lowerCAmelCase : Tuple = 0
else:
_lowerCAmelCase : Tuple = 0
i += 1
generate(len(_lowerCamelCase ) , _lowerCamelCase )
return res
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
_lowerCAmelCase : str = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : Union[str, Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sgugger/tiny-distilbert-classification"
_lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, only_pretrain_model=__a, )
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Dict = TensorFlowBenchmark(__a)
_lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Any = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : int = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "sshleifer/tiny-gpt2"
_lowerCAmelCase : int = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : int = TensorFlowBenchmark(__a)
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "sshleifer/tiny-gpt2"
_lowerCAmelCase : str = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Dict = TensorFlowBenchmark(__a, configs=[config])
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], use_xla=__a, multi_process=__a, )
_lowerCAmelCase : Optional[int] = TensorFlowBenchmark(__a)
_lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, save_to_csv=__a, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(__a, "inf_time.csv"), inference_memory_csv_file=os.path.join(__a, "inf_mem.csv"), env_info_csv_file=os.path.join(__a, "env.csv"), multi_process=__a, )
_lowerCAmelCase : Optional[int] = TensorFlowBenchmark(__a)
benchmark.run()
self.assertTrue(Path(os.path.join(__a, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "env.csv")).exists())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__a):
self.assertTrue(hasattr(__a, "sequential"))
self.assertTrue(hasattr(__a, "cumulative"))
self.assertTrue(hasattr(__a, "current"))
self.assertTrue(hasattr(__a, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(__a, "log.txt"), log_print=__a, trace_memory_line_by_line=__a, eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a)
_lowerCAmelCase : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(__a, "log.txt")).exists())
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import Any
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = v.conjugate().T
_lowerCAmelCase : str = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowerCAmelCase : Optional[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), F"{a} is not hermitian."
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : str = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), F"{a} is not hermitian."
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : List[Any] = use_attention_mask
_lowerCAmelCase : str = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Union[str, Any] = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Any = num_choices
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Tuple = None
if self.use_attention_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : int = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__a, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxRobertaModelTester(self)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class_name.from_pretrained("roberta-base", from_pt=__a)
_lowerCAmelCase : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(__a)
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_lowerCAmelCase : Optional[int] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase : List[str] = str(bin(_lowerCamelCase ) )[2:]
_lowerCAmelCase : Dict = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[int] = use_attention_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[str] = num_choices
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Any = None
if self.use_attention_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__a, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaxAlbertModelTester(self)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : int = model_class_name.from_pretrained("albert-base-v2")
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 1)))
self.assertIsNotNone(__a)
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2")
_lowerCAmelCase : int = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_lowerCAmelCase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_lowerCAmelCase : Tuple = model(__a, attention_mask=__a)[0]
_lowerCAmelCase : Dict = (1, 11, 768)
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], __a, atol=1E-4))
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionPanoramaPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : str = DDIMScheduler()
torch.manual_seed(0)
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowerCAmelCase : List[str] = CLIPTextModel(__a)
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = torch.manual_seed(__a)
_lowerCAmelCase : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : str = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Dict = sd_pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Optional[Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = "french fries"
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a, negative_prompt=__a)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = sd_pipe(**__a, view_batch_size=2)
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear")
_lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Any = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = self.get_dummy_inputs(__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = PNDMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=__a)
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Dict = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a)
_lowerCAmelCase : str = sd_pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(__a)
_lowerCAmelCase : Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : List[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Dict = self.get_inputs()
_lowerCAmelCase : Union[str, Any] = pipe(**__a).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : int = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", safety_checker=__a)
_lowerCAmelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Union[str, Any] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
def callback_fn(__a, __a, __a) -> None:
_lowerCAmelCase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCAmelCase : str = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
_lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Any = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Any = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
pipe(**__a, callback=__a, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : str = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Optional[int] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : int = self.get_inputs()
_lowerCAmelCase : int = pipe(**__a)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=18, __a=30, __a=400, __a=True, __a=None, __a=True, __a=None, __a=True, __a=[0.48_145_466, 0.4_578_275, 0.40_821_073], __a=[0.26_862_954, 0.26_130_258, 0.27_577_711], __a=True, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = size if size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowerCAmelCase : str = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Union[str, Any] = image_std
_lowerCAmelCase : Optional[Any] = do_convert_rgb
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def snake_case__ ( self, __a=False, __a=False, __a=False):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCAmelCase : int = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta))
else:
_lowerCAmelCase : Tuple = []
for i in range(self.batch_size):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
if torchify:
_lowerCAmelCase : Any = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ChineseCLIPImageProcessingTester(self, do_center_crop=__a)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "do_center_crop"))
self.assertTrue(hasattr(__a, "center_crop"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_convert_rgb"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
_lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : Tuple = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : Dict = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=__a)
_lowerCAmelCase : Union[str, Any] = 3
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "do_center_crop"))
self.assertTrue(hasattr(__a, "center_crop"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_convert_rgb"))
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : int = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
# Algorithm for the pigeonhole sorting
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Optional[Any] = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : int = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Union[str, Any] = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Tuple = count + min_val
i += 1
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("Sorted order is:" , " ".join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = True
@register_to_config
def __init__( self, __a = 3, __a = 3, __a = ("DownEncoderBlock2D",), __a = ("UpDecoderBlock2D",), __a = (64,), __a = 1, __a = "silu", __a = 4, __a = 32, __a = 32, __a = 0.18_215, ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowerCAmelCase : Union[str, Any] = Encoder(
in_channels=__a, out_channels=__a, down_block_types=__a, block_out_channels=__a, layers_per_block=__a, act_fn=__a, norm_num_groups=__a, double_z=__a, )
# pass init params to Decoder
_lowerCAmelCase : List[Any] = Decoder(
in_channels=__a, out_channels=__a, up_block_types=__a, block_out_channels=__a, layers_per_block=__a, norm_num_groups=__a, act_fn=__a, )
_lowerCAmelCase : List[str] = nn.Convad(2 * latent_channels, 2 * latent_channels, 1)
_lowerCAmelCase : str = nn.Convad(__a, __a, 1)
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = False
# only relevant if vae tiling is enabled
_lowerCAmelCase : Optional[int] = self.config.sample_size
_lowerCAmelCase : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple))
else self.config.sample_size
)
_lowerCAmelCase : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
_lowerCAmelCase : Dict = 0.25
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if isinstance(__a, (Encoder, Decoder)):
_lowerCAmelCase : Any = value
def snake_case__ ( self, __a = True):
'''simple docstring'''
_lowerCAmelCase : Dict = use_tiling
def snake_case__ ( self):
'''simple docstring'''
self.enable_tiling(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = {}
def fn_recursive_add_processors(__a, __a, __a):
if hasattr(__a, "set_processor"):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", __a, __a)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a, __a, __a)
return processors
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(self.attn_processors.keys())
if isinstance(__a, __a) and len(__a) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__a)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__a, __a, __a):
if hasattr(__a, "set_processor"):
if not isinstance(__a, __a):
module.set_processor(__a)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", __a, __a)
for name, module in self.named_children():
fn_recursive_attn_processor(__a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__a, return_dict=__a)
if self.use_slicing and x.shape[0] > 1:
_lowerCAmelCase : List[str] = [self.encoder(__a) for x_slice in x.split(1)]
_lowerCAmelCase : Optional[int] = torch.cat(__a)
else:
_lowerCAmelCase : List[str] = self.encoder(__a)
_lowerCAmelCase : Union[str, Any] = self.quant_conv(__a)
_lowerCAmelCase : Any = DiagonalGaussianDistribution(__a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a)
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__a, return_dict=__a)
_lowerCAmelCase : Dict = self.post_quant_conv(__a)
_lowerCAmelCase : int = self.decoder(__a)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
@apply_forward_hook
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
_lowerCAmelCase : int = [self._decode(__a).sample for z_slice in z.split(1)]
_lowerCAmelCase : Tuple = torch.cat(__a)
else:
_lowerCAmelCase : int = self._decode(__a).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(a.shape[2], b.shape[2], __a)
for y in range(__a):
_lowerCAmelCase : Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(a.shape[3], b.shape[3], __a)
for x in range(__a):
_lowerCAmelCase : Any = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
_lowerCAmelCase : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor)
_lowerCAmelCase : Tuple = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCAmelCase : str = []
for i in range(0, x.shape[2], __a):
_lowerCAmelCase : List[str] = []
for j in range(0, x.shape[3], __a):
_lowerCAmelCase : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCAmelCase : Union[str, Any] = self.encoder(__a)
_lowerCAmelCase : List[str] = self.quant_conv(__a)
row.append(__a)
rows.append(__a)
_lowerCAmelCase : Dict = []
for i, row in enumerate(__a):
_lowerCAmelCase : Union[str, Any] = []
for j, tile in enumerate(__a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCAmelCase : Optional[Any] = self.blend_v(rows[i - 1][j], __a, __a)
if j > 0:
_lowerCAmelCase : Union[str, Any] = self.blend_h(row[j - 1], __a, __a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__a, dim=3))
_lowerCAmelCase : Union[str, Any] = torch.cat(__a, dim=2)
_lowerCAmelCase : Any = DiagonalGaussianDistribution(__a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a)
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : List[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
_lowerCAmelCase : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor)
_lowerCAmelCase : Optional[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCAmelCase : Tuple = []
for i in range(0, z.shape[2], __a):
_lowerCAmelCase : Tuple = []
for j in range(0, z.shape[3], __a):
_lowerCAmelCase : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCAmelCase : List[str] = self.post_quant_conv(__a)
_lowerCAmelCase : Union[str, Any] = self.decoder(__a)
row.append(__a)
rows.append(__a)
_lowerCAmelCase : str = []
for i, row in enumerate(__a):
_lowerCAmelCase : Any = []
for j, tile in enumerate(__a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCAmelCase : Optional[int] = self.blend_v(rows[i - 1][j], __a, __a)
if j > 0:
_lowerCAmelCase : Dict = self.blend_h(row[j - 1], __a, __a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__a, dim=3))
_lowerCAmelCase : Any = torch.cat(__a, dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
def snake_case__ ( self, __a, __a = False, __a = True, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = sample
_lowerCAmelCase : Any = self.encode(__a).latent_dist
if sample_posterior:
_lowerCAmelCase : int = posterior.sample(generator=__a)
else:
_lowerCAmelCase : List[str] = posterior.mode()
_lowerCAmelCase : str = self.decode(__a).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'Wav2Vec2FeatureExtractor'
lowerCamelCase__ = 'AutoTokenizer'
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
_lowerCAmelCase : Union[str, Any] = self.feature_extractor
_lowerCAmelCase : List[str] = False
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
try:
return super().from_pretrained(__a, **__a)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", __a, )
_lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__a, **__a)
_lowerCAmelCase : List[str] = WavaVecaCTCTokenizer.from_pretrained(__a, **__a)
return cls(feature_extractor=__a, tokenizer=__a)
def __call__( self, *__a, **__a):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__a, **__a)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_lowerCAmelCase : Union[str, Any] = kwargs.pop("raw_speech")
else:
_lowerCAmelCase : Tuple = kwargs.pop("audio", __a)
_lowerCAmelCase : int = kwargs.pop("sampling_rate", __a)
_lowerCAmelCase : str = kwargs.pop("text", __a)
if len(__a) > 0:
_lowerCAmelCase : str = args[0]
_lowerCAmelCase : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_lowerCAmelCase : Dict = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a)
if text is not None:
_lowerCAmelCase : List[str] = self.tokenizer(__a, **__a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : str = encodings["input_ids"]
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*__a, **__a)
_lowerCAmelCase : Optional[Any] = kwargs.pop("input_features", __a)
_lowerCAmelCase : Any = kwargs.pop("labels", __a)
if len(__a) > 0:
_lowerCAmelCase : str = args[0]
_lowerCAmelCase : List[str] = args[1:]
if input_features is not None:
_lowerCAmelCase : List[Any] = self.feature_extractor.pad(__a, *__a, **__a)
if labels is not None:
_lowerCAmelCase : Any = self.tokenizer.pad(__a, **__a)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase : str = labels["input_ids"]
return input_features
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@contextmanager
def snake_case__ ( self):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[Any] = self.tokenizer
yield
_lowerCAmelCase : Union[str, Any] = self.feature_extractor
_lowerCAmelCase : Any = False
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[Any] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : int = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : List[Any] = bbox[i, j, 3]
_lowerCAmelCase : str = bbox[i, j, 1]
_lowerCAmelCase : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Dict = bbox[i, j, 2]
_lowerCAmelCase : int = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Any = tf.convert_to_tensor(__a)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFLayoutLMModel(config=__a)
_lowerCAmelCase : Any = model(__a, __a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForMaskedLM(config=__a)
_lowerCAmelCase : int = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Dict = TFLayoutLMForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[int] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : List[str] = TFLayoutLMForTokenClassification(config=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLayoutLMForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = 10
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Onnx compliancy broke with TF 2.10")
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCAmelCase : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowerCAmelCase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the sequence output on [0, :3, :3]
_lowerCAmelCase : List[Any] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-3))
# test the pooled output on [1, :3]
_lowerCAmelCase : Dict = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], __a, atol=1E-3))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : int = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=tf.convert_to_tensor([1, 1]), )
# test whether we get a loss as a scalar
_lowerCAmelCase : Dict = outputs.loss
_lowerCAmelCase : str = (2,)
self.assertEqual(loss.shape, __a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : int = (2, 2)
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : Tuple = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the shape of the logits
_lowerCAmelCase : List[Any] = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape, __a)
self.assertEqual(outputs.end_logits.shape, __a)
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case = "\nHuman: <<task>>\n\nAssistant: "
_snake_case = "huggingface-tools/default-prompts"
_snake_case = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_lowerCAmelCase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCamelCase ) is not None:
return prompt_or_repo_id
_lowerCAmelCase : Dict = cached_file(
_lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( a):
def __init__( self, __a=None, __a=None, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
if config is None:
assert isinstance(self.model, __a), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_lowerCAmelCase : Tuple = self.model.config
else:
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Dict = data_args
_lowerCAmelCase : List[str] = self.config.tgt_vocab_size if isinstance(self.config, __a) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding..")
if self.args.label_smoothing == 0:
_lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCAmelCase : str = label_smoothed_nll_loss
def snake_case__ ( self, __a):
'''simple docstring'''
if self.optimizer is None:
_lowerCAmelCase : Optional[Any] = ["bias", "LayerNorm.weight"]
_lowerCAmelCase : int = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
_lowerCAmelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCAmelCase : List[Any] = Adafactor
_lowerCAmelCase : List[str] = {"scale_parameter": False, "relative_step": False}
else:
_lowerCAmelCase : str = AdamW
_lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
_lowerCAmelCase : Dict = OSS(
params=__a, optim=__a, **__a, )
else:
_lowerCAmelCase : Union[str, Any] = optimizer_cls(__a, **__a)
if self.lr_scheduler is None:
_lowerCAmelCase : Dict = self._get_lr_scheduler(__a)
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
_lowerCAmelCase : Any = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=__a)
return scheduler
def snake_case__ ( self):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCAmelCase : List[Any] = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Tuple = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
# compute usual loss via models
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model(**__a, labels=__a, use_cache=__a)[:2]
else:
# compute label smoothed loss
_lowerCAmelCase : str = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Any = torch.nn.functional.log_softmax(__a, dim=-1)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.loss_fn(__a, __a, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return loss, logits
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = inputs.pop("labels")
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._compute_loss(__a, __a, __a)
return loss
def snake_case__ ( self, __a, __a, __a, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Any = self._prepare_inputs(__a)
_lowerCAmelCase : str = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCAmelCase : Any = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **__a, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Dict = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
_lowerCAmelCase : Union[str, Any] = inputs.pop("labels")
with torch.no_grad():
# compute loss on predict data
_lowerCAmelCase , _lowerCAmelCase : Any = self._compute_loss(__a, __a, __a)
_lowerCAmelCase : str = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCAmelCase : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
return (loss, logits, labels)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f" padded to `max_length`={max_length}")
_lowerCAmelCase : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device)
_lowerCAmelCase : int = tensor
return padded_tensor
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
import math
import qiskit
def A ( _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = 1 ):
'''simple docstring'''
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_lowerCAmelCase : List[Any] = qiskit.QuantumRegister(4 , "qr" )
_lowerCAmelCase : Dict = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_lowerCAmelCase : Any = [input_a, input_a, carry_in]
_lowerCAmelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCamelCase ) # measure the last two qbits
_lowerCAmelCase : Tuple = qiskit.Aer.get_backend("aer_simulator" )
_lowerCAmelCase : Dict = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A ( _lowerCamelCase ):
'''simple docstring'''
def is_in_circle(_lowerCamelCase , _lowerCamelCase ) -> bool:
_lowerCAmelCase : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowerCAmelCase : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_lowerCAmelCase : Tuple = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_lowerCamelCase , _lowerCamelCase ) ) for _ in range(_lowerCamelCase ) ) * (max_value - min_value)
def A ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 ):
'''simple docstring'''
def identity_function(_lowerCamelCase ) -> float:
return x
_lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def A ( _lowerCamelCase ):
'''simple docstring'''
def function_to_integrate(_lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
_lowerCAmelCase : List[Any] = area_under_curve_estimator(
_lowerCamelCase , _lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'sew-d'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a=2, __a=512, __a=256, __a=True, __a=True, __a=("p2c", "c2p"), __a="layer_norm", __a="gelu_python", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.1, __a=0.02, __a=1E-7, __a=1E-5, __a="group", __a="gelu", __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), __a=False, __a=128, __a=16, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a="mean", __a=False, __a=False, __a=256, __a=0, __a=1, __a=2, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = feat_extract_norm
_lowerCAmelCase : List[str] = feat_extract_activation
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : Tuple = conv_bias
_lowerCAmelCase : Tuple = num_conv_pos_embeddings
_lowerCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : Optional[int] = len(self.conv_dim)
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = squeeze_factor
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : int = position_buckets
_lowerCAmelCase : str = share_att_key
_lowerCAmelCase : Optional[int] = relative_attention
_lowerCAmelCase : List[Any] = norm_rel_ebd
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Optional[int] = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = feature_layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : int = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : Dict = mask_time_prob
_lowerCAmelCase : Tuple = mask_time_length
_lowerCAmelCase : Any = mask_time_min_masks
_lowerCAmelCase : Union[str, Any] = mask_feature_prob
_lowerCAmelCase : Tuple = mask_feature_length
_lowerCAmelCase : Tuple = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# sequence classification
_lowerCAmelCase : List[str] = use_weighted_layer_sum
_lowerCAmelCase : Dict = classifier_proj_size
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
import cmath
import math
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = math.radians(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = math.radians(_lowerCamelCase )
# Convert voltage and current to rectangular form
_lowerCAmelCase : Tuple = cmath.rect(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = cmath.rect(_lowerCamelCase , _lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_snake_case = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_snake_case = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : Any = json.loads(f.read() )
_lowerCAmelCase : List[str] = collections.OrderedDict()
_lowerCAmelCase : Tuple = collections.OrderedDict()
_lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
_lowerCAmelCase : Optional[int] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = b
_lowerCAmelCase : str = idx
for wd in b:
_lowerCAmelCase : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="<|endoftext|>", __a="<|endoftext|>", __a="<|startoftext|>", __a="<|endoftext|>", __a=False, **__a, ):
'''simple docstring'''
super().__init__(
unk_token=__a, pad_token=__a, bos_token=__a, eos_token=__a, do_clean_text=__a, **__a, )
if not os.path.isfile(__a):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(__a):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
_lowerCAmelCase : Dict = do_clean_text
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_vocab_and_emoji(__a, __a)
_lowerCAmelCase : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.raw_vocab)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.raw_vocab, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__a, clean=self.do_clean_text)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.vocab.get(__a, self.vocab.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = "".join(__a).strip()
return out_string
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a, add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
_lowerCAmelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
if os.path.isdir(__a):
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
_lowerCAmelCase : Optional[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
_lowerCAmelCase : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a, "w", encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!")
_lowerCAmelCase : Optional[Any] = token_index
writer.write(",".join(__a) + "\n")
index += 1
with open(__a, "w", encoding="utf-8") as writer:
json.dump(self.emoji, __a)
return vocab_file, emoji_file
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab # same as swe
_lowerCAmelCase : int = ids_to_tokens # same as bpe
_lowerCAmelCase : Union[str, Any] = emoji
_lowerCAmelCase : int = np.max([len(__a) for w in self.vocab.keys()])
_lowerCAmelCase : Dict = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
_lowerCAmelCase : Optional[Any] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
_lowerCAmelCase : List[Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
_lowerCAmelCase : List[str] = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
_lowerCAmelCase : List[Any] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
_lowerCAmelCase : List[Any] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
_lowerCAmelCase : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
_lowerCAmelCase : List[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
_lowerCAmelCase : Tuple = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self):
'''simple docstring'''
return len(self.ids_to_tokens)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.content_repattera.sub("<URL>", __a)
_lowerCAmelCase : int = self.content_repattera.sub("<EMAIL>", __a)
_lowerCAmelCase : Tuple = self.content_repattera.sub("<TEL>", __a)
_lowerCAmelCase : Optional[int] = self.content_repattera.sub("<DATE>", __a)
_lowerCAmelCase : Dict = self.content_repattera.sub("<DATE>", __a)
_lowerCAmelCase : str = self.content_repattera.sub("<PRICE>", __a)
_lowerCAmelCase : str = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
_lowerCAmelCase : Tuple = content.replace("<BLOCK><BLOCK>", "<BLOCK>")
return content
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = text.replace(" ", "<SP>")
_lowerCAmelCase : Tuple = text.replace(" ", "<SP>")
_lowerCAmelCase : Tuple = text.replace("\r\n", "<BR>")
_lowerCAmelCase : Optional[int] = text.replace("\n", "<BR>")
_lowerCAmelCase : Optional[int] = text.replace("\r", "<BR>")
_lowerCAmelCase : List[Any] = text.replace("\t", "<TAB>")
_lowerCAmelCase : Tuple = text.replace("—", "ー")
_lowerCAmelCase : Dict = text.replace("−", "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCAmelCase : Union[str, Any] = text.replace(__a, __a)
if clean:
_lowerCAmelCase : Optional[Any] = self.clean_text(__a)
def check_simbol(__a):
_lowerCAmelCase : List[str] = x.encode()
if len(__a) == 1 and len(__a) == 2:
_lowerCAmelCase : Any = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(__a):
_lowerCAmelCase : Union[str, Any] = x.encode()
if len(__a) == 1 and len(__a) == 3:
_lowerCAmelCase : int = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = []
while pos < len(__a):
_lowerCAmelCase : Dict = min(len(__a), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
_lowerCAmelCase : Union[str, Any] = [] # (token_id, token, pos)
for e in range(__a, __a, -1):
_lowerCAmelCase : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a) > 2:
_lowerCAmelCase : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(__a) > 0:
# the smallest token_id is adopted
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = sorted(__a, key=lambda __a: x[0])[0]
result.append(__a)
_lowerCAmelCase : Tuple = e
else:
_lowerCAmelCase : Tuple = pos + 1
_lowerCAmelCase : Optional[int] = text[pos:end]
if check_simbol(__a):
result.append("<KIGOU>")
elif checkuae(__a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
_lowerCAmelCase : Any = end
return result
def snake_case__ ( self, __a, __a="\n"):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : int = []
_lowerCAmelCase : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(__a) > 0:
words.append(bytearray(__a).decode("utf-8", errors="replace"))
_lowerCAmelCase : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(__a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(__a)
if len(__a) > 0:
words.append(bytearray(__a).decode("utf-8", errors="replace"))
_lowerCAmelCase : Optional[int] = "".join(__a)
return text
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCAmelCase : Dict = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCAmelCase : Any = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = dct.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
def A ( _lowerCamelCase ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
_lowerCAmelCase : List[str] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowerCAmelCase : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCamelCase )
_lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCAmelCase : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCAmelCase : Dict = 1_024
_lowerCAmelCase : Optional[int] = 4_096
_lowerCAmelCase : Union[str, Any] = 24
_lowerCAmelCase : int = 16
_lowerCAmelCase : int = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = "relu"
_lowerCAmelCase : int = 1_024
_lowerCAmelCase : Any = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[Any] = False
# load HuggingFace model
_lowerCAmelCase : List[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase )
_lowerCAmelCase : str = TrOCRForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
# load state_dict of original model, rename some keys
_lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , check_hash=_lowerCamelCase )["model"]
_lowerCAmelCase : Any = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowerCAmelCase : str = val
else:
_lowerCAmelCase : Optional[int] = val
# load state dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
_lowerCAmelCase : List[str] = ViTImageProcessor(size=encoder_config.image_size )
_lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("roberta-large" )
_lowerCAmelCase : int = TrOCRProcessor(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = processor(images=prepare_img(_lowerCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
_lowerCAmelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCAmelCase : Optional[int] = model(pixel_values=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_lowerCAmelCase : List[Any] = outputs.logits
_lowerCAmelCase : Tuple = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCAmelCase : Optional[Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCAmelCase : int = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCAmelCase : List[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCAmelCase : str = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=3, __a=4, __a=2, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=36, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=6, __a=6, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = coordinate_size
_lowerCAmelCase : Union[str, Any] = shape_size
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Any = text_seq_length
_lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : int = self.text_seq_length + self.image_seq_length
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size)
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox)
_lowerCAmelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : List[Any] = bbox[i, j, 3]
_lowerCAmelCase : Union[str, Any] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 2]
_lowerCAmelCase : List[str] = bbox[i, j, 0]
_lowerCAmelCase : List[str] = tmp_coordinate
_lowerCAmelCase : List[Any] = tf.constant(__a)
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length])
_lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size)
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels)
_lowerCAmelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMvaModel(config=__a)
# text + image
_lowerCAmelCase : Dict = model(__a, pixel_values=__a, training=__a)
_lowerCAmelCase : Union[str, Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, training=__a, )
_lowerCAmelCase : Optional[Any] = model(__a, bbox=__a, pixel_values=__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowerCAmelCase : Dict = model(__a, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowerCAmelCase : Tuple = model({"pixel_values": pixel_values}, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : Dict = TFLayoutLMvaForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[int] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : List[Any] = TFLayoutLMvaForTokenClassification(config=__a)
_lowerCAmelCase : Optional[int] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : int = TFLayoutLMvaForQuestionAnswering(config=__a)
_lowerCAmelCase : Optional[int] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, training=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : str = config_and_inputs
_lowerCAmelCase : str = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(__a)
if model_class in get_values(__a):
_lowerCAmelCase : Any = {
k: tf.tile(tf.expand_dims(__a, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(__a, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : int = tf.ones(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
_lowerCAmelCase : Optional[int] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMvaModelTester(self)
_lowerCAmelCase : Optional[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(__a)
if getattr(__a, "hf_compute_loss", __a):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=__a)[0]
]
_lowerCAmelCase : Optional[int] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Optional[int] = prepared_for_class.pop("input_ids")
_lowerCAmelCase : int = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Optional[int] = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_lowerCAmelCase : str = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowerCAmelCase : Any = -100
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(__a)
_lowerCAmelCase : Tuple = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Dict = model(__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call).parameters
_lowerCAmelCase : Union[str, Any] = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase : Optional[Any] = {0: "input_ids"}
for label_key in label_keys:
_lowerCAmelCase : Dict = signature_names.index(__a)
_lowerCAmelCase : Tuple = label_key
_lowerCAmelCase : Tuple = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase : Any = prepared_for_class[value]
_lowerCAmelCase : Union[str, Any] = tuple(__a)
# Send to model
_lowerCAmelCase : List[str] = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a, __a, __a, __a, __a, __a, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFLayoutLMvaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(images=__a, return_tensors="tf").pixel_values
_lowerCAmelCase : int = tf.constant([[1, 2]])
_lowerCAmelCase : List[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0)
# forward pass
_lowerCAmelCase : Dict = model(input_ids=__a, bbox=__a, pixel_values=__a, training=__a)
# verify the logits
_lowerCAmelCase : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape, __a)
_lowerCAmelCase : List[str] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-4))
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] < arr[j]:
_lowerCAmelCase : Optional[int] = arr[j]
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i, outer in enumerate(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase : str = inner
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
_lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
_lowerCAmelCase : List[Any] = (
"Wrong input data's dimensions... "
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_lowerCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCAmelCase : Any = (
"Wrong input data's shape... "
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_lowerCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_lowerCAmelCase : Dict = (
"Input data have different datatype... "
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : List[str] = []
for value in value_array:
_lowerCAmelCase : int = euclidean(_lowerCamelCase , dataset[0] )
_lowerCAmelCase : Optional[int] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCAmelCase : Optional[Any] = euclidean(_lowerCamelCase , _lowerCamelCase )
if dist > temp_dist:
_lowerCAmelCase : str = temp_dist
_lowerCAmelCase : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return np.dot(_lowerCamelCase , _lowerCamelCase ) / (norm(_lowerCamelCase ) * norm(_lowerCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
_snake_case = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = BertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : int = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : Optional[Any] = do_lower_case
_lowerCAmelCase : Dict = strip_accents
_lowerCAmelCase : Any = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**__a)
_lowerCAmelCase : Any = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase , _lowerCamelCase="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) as f:
_lowerCAmelCase : int = json.load(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = []
for key, info in class_info.items():
_lowerCAmelCase : Union[str, Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCamelCase ) )
_lowerCAmelCase : Dict = thing_ids
_lowerCAmelCase : str = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=30, __a=400, __a=None, __a=True, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=10, __a=False, __a=255, __a="shi-labs/oneformer_demo", __a="ade20k_panoptic.json", __a=10, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : Optional[Any] = max_resolution
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : int = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : Optional[Any] = image_mean
_lowerCAmelCase : List[Any] = image_std
_lowerCAmelCase : Optional[int] = class_info_file
_lowerCAmelCase : Any = prepare_metadata(__a, __a)
_lowerCAmelCase : Optional[int] = num_text
_lowerCAmelCase : int = repo_path
# for the post_process_functions
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : str = 3
_lowerCAmelCase : int = 4
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Tuple = do_reduce_labels
_lowerCAmelCase : Optional[int] = ignore_index
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if not batched:
_lowerCAmelCase : Optional[int] = image_inputs[0]
if isinstance(__a, Image.Image):
_lowerCAmelCase , _lowerCAmelCase : Tuple = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : str = int(self.size["shortest_edge"] * h / w)
_lowerCAmelCase : int = self.size["shortest_edge"]
elif w > h:
_lowerCAmelCase : str = self.size["shortest_edge"]
_lowerCAmelCase : int = int(self.size["shortest_edge"] * w / h)
else:
_lowerCAmelCase : Tuple = self.size["shortest_edge"]
_lowerCAmelCase : Tuple = self.size["shortest_edge"]
else:
_lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCAmelCase : List[str] = max(__a, key=lambda __a: item[0])[0]
_lowerCAmelCase : List[Any] = max(__a, key=lambda __a: item[1])[1]
return expected_height, expected_width
def snake_case__ ( self):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), )
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCamelCase__ = image_processing_class
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OneFormerImageProcessorTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "ignore_index"))
self.assertTrue(hasattr(__a, "class_info_file"))
self.assertTrue(hasattr(__a, "num_text"))
self.assertTrue(hasattr(__a, "repo_path"))
self.assertTrue(hasattr(__a, "metadata"))
self.assertTrue(hasattr(__a, "do_reduce_labels"))
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : str = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Tuple = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input
_lowerCAmelCase : List[str] = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : int = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : str = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input
_lowerCAmelCase : int = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Tuple = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self, __a=False, __a=False, __a="np"):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# prepare image and target
_lowerCAmelCase : List[str] = self.image_processing_tester.num_labels
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a)
if with_segmentation_maps:
_lowerCAmelCase : Optional[int] = num_labels
if is_instance_map:
_lowerCAmelCase : Tuple = list(range(__a)) * 2
_lowerCAmelCase : Tuple = dict(enumerate(__a))
_lowerCAmelCase : Union[str, Any] = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uinta) for img in image_inputs
]
if segmentation_type == "pil":
_lowerCAmelCase : str = [Image.fromarray(__a) for annotation in annotations]
_lowerCAmelCase : List[Any] = image_processor(
__a, ["semantic"] * len(__a), __a, return_tensors="pt", instance_id_to_semantic_id=__a, pad_and_return_pixel_mask=__a, )
return inputs
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
def common(__a=False, __a=None):
_lowerCAmelCase : Optional[int] = self.comm_get_image_processor_inputs(
with_segmentation_maps=__a, is_instance_map=__a, segmentation_type=__a)
_lowerCAmelCase : Optional[int] = inputs["mask_labels"]
_lowerCAmelCase : Optional[int] = inputs["class_labels"]
_lowerCAmelCase : Union[str, Any] = inputs["pixel_values"]
_lowerCAmelCase : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(__a, __a, __a):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(len(__a), self.image_processing_tester.num_text)
common()
common(is_instance_map=__a)
common(is_instance_map=__a, segmentation_type="pil")
common(is_instance_map=__a, segmentation_type="pil")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = np.zeros((20, 50))
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Dict = binary_mask_to_rle(__a)
self.assertEqual(len(__a), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(__a)
self.assertEqual(len(__a), self.image_processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
_lowerCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size)]
_lowerCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(__a, target_sizes=__a)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : List[str] = image_processor.post_process_instance_segmentation(__a, threshold=0)
self.assertTrue(len(__a) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), __a)
self.assertEqual(
el["segmentation"].shape, (self.image_processing_tester.height, self.image_processing_tester.width))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : Union[str, Any] = image_processor.post_process_panoptic_segmentation(__a, threshold=0)
self.assertTrue(len(__a) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), __a)
self.assertEqual(
el["segmentation"].shape, (self.image_processing_tester.height, self.image_processing_tester.width))
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : List[str] = 7
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = 99
_lowerCAmelCase : Union[str, Any] = 32
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Dict = 37
_lowerCAmelCase : List[str] = "gelu"
_lowerCAmelCase : int = 0.1
_lowerCAmelCase : Tuple = 0.1
_lowerCAmelCase : Optional[Any] = 512
_lowerCAmelCase : Dict = 16
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = 0.02
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : str = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : int = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : str = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Optional[Any] = model(__a)
_lowerCAmelCase : Any = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = TFEsmModel(config=__a)
_lowerCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_lowerCAmelCase : Optional[int] = model(__a)
_lowerCAmelCase : List[str] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(__a, encoder_hidden_states=__a)
# Also check the case where encoder outputs are not passed
_lowerCAmelCase : List[str] = model(__a, attention_mask=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFEsmForMaskedLM(config=__a)
_lowerCAmelCase : Any = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFEsmForTokenClassification(config=__a)
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Tuple = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFEsmModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFEsmModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase : Union[str, Any] = model.get_bias()
assert isinstance(__a, __a)
for k, v in name.items():
assert isinstance(__a, tf.Variable)
else:
_lowerCAmelCase : str = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Optional[Any] = model(__a)[0]
_lowerCAmelCase : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape), __a)
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowerCAmelCase : Tuple = model(__a)[0]
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4))
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_snake_case = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCAmelCase_ ( tr.AbstractTransform):
def __init__( self, __a = " "):
'''simple docstring'''
_lowerCAmelCase : int = sentence_delimiter
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for sent_idx, sentence in enumerate(__a):
chars.extend(self.process_string(__a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a) - 1:
chars.append(self.sentence_delimiter)
return chars
_snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_snake_case = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )["wer"]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[Any] = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Dict = jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = KandinskyVaaControlnetPipeline
lowerCamelCase__ = ['image_embeds', 'negative_image_embeds', 'hint']
lowerCamelCase__ = ['image_embeds', 'negative_image_embeds', 'hint']
lowerCamelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ = False
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self):
'''simple docstring'''
return 100
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : int = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCAmelCase : Tuple = UNetaDConditionModel(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00_085, beta_end=0.012, clip_sample=__a, set_alpha_to_one=__a, steps_offset=1, prediction_type="epsilon", thresholding=__a, )
_lowerCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(__a)).to(__a)
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
__a)
# create hint
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(__a)).to(__a)
if str(__a).startswith("mps"):
_lowerCAmelCase : Optional[int] = torch.manual_seed(__a)
else:
_lowerCAmelCase : Dict = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Any = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "cpu"
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**__a)
_lowerCAmelCase : List[str] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(__a))
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(__a), return_dict=__a, )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(np.array(__a)).float() / 255.0
_lowerCAmelCase : List[Any] = hint.permute(2, 0, 1).unsqueeze(0)
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa)
pipe_prior.to(__a)
_lowerCAmelCase : str = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.floataa)
_lowerCAmelCase : Any = pipeline.to(__a)
pipeline.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[int] = "A robot, 4k photo"
_lowerCAmelCase : List[str] = torch.Generator(device="cuda").manual_seed(0)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = pipe_prior(
__a, generator=__a, num_inference_steps=5, negative_prompt="", ).to_tuple()
_lowerCAmelCase : str = torch.Generator(device="cuda").manual_seed(0)
_lowerCAmelCase : List[Any] = pipeline(
image_embeds=__a, negative_image_embeds=__a, hint=__a, generator=__a, num_inference_steps=100, output_type="np", )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a, __a)
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0.00
_lowerCAmelCase : List[str] = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase : Tuple = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(_lowerCamelCase )
first_sum += 1 / float(_lowerCamelCase )
index += 1
return 1 / first_sum
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = 0.00
_lowerCAmelCase : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase : Dict = F"Resistor at index {index} has a negative value!"
raise ValueError(_lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ ( a):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = data
def __iter__( self):
'''simple docstring'''
for element in self.data:
yield element
def A ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : Any = Accelerator(even_batches=_lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if iterable:
_lowerCAmelCase : List[str] = DummyIterableDataset(torch.as_tensor(range(_lowerCamelCase ) ) )
else:
_lowerCAmelCase : Union[str, Any] = TensorDataset(torch.as_tensor(range(_lowerCamelCase ) ) )
_lowerCAmelCase : List[str] = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Tuple = accelerator.prepare(_lowerCamelCase )
return dl
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = create_dataloader(accelerator=_lowerCamelCase , dataset_size=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = create_accelerator(even_batches=_lowerCamelCase )
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : int = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Any = accelerator.prepare(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Tuple = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = ddp_model(batch[0].float() )
_lowerCAmelCase : List[Any] = output.sum()
loss.backward()
batch_idxs.append(_lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def A ( _lowerCamelCase ):
'''simple docstring'''
with warnings.catch_warnings(record=_lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = True
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
_lowerCAmelCase : Optional[Any] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
_lowerCAmelCase : Any = train_dl.batch_sampler.even_batches
_lowerCAmelCase : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[int] = create_accelerator(even_batches=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : str = accelerator.prepare(_lowerCamelCase )
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase )
_lowerCAmelCase : str = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
_lowerCAmelCase : int = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = create_accelerator()
_lowerCAmelCase : str = torch.nn.Linear(1 , 1 )
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(_lowerCamelCase )
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase )
with warnings.catch_warnings(record=_lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase ):
pass
assert issubclass(w[-1].category , _lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_lowerCAmelCase : List[str] = accelerator.state.distributed_type
_lowerCAmelCase : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_state
if __name__ == "__main__":
main()
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 300
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 1
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=30, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : List[str] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTMSNModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.type_sequence_label_size
_lowerCAmelCase : Optional[int] = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[str] = model(__a, labels=__a)
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}")
print("Labels: {labels}")
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViTMSNModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(__a)
_lowerCAmelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = ViTMSNModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(2)
_lowerCAmelCase : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(__a)
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**__a)
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : str = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 300
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 1
|
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_snake_case = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_snake_case = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Union[str, Any] = {"BertModelTest": "BertModelTester"}
_lowerCAmelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_test_mapping(__a)
_lowerCAmelCase : int = get_model_to_test_mapping(__a)
_lowerCAmelCase : str = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCAmelCase : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCAmelCase : Any = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : List[str] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Optional[Any] = ly_weight["attention"]
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : Any = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : List[Any] = ly_weight["attention"]
_lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase : List[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : Tuple = ly_weight["self_attention"]
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Dict = ly_weight["MultiHeadDotProductAttention_0"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase : Tuple = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
_lowerCAmelCase : Dict = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
_lowerCAmelCase : List[Any] = os.path.join(args.checkpoint_path , ".." , "config.gin" )
_lowerCAmelCase : List[str] = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
_lowerCAmelCase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
_lowerCAmelCase : Any = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
_lowerCAmelCase : List[Any] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
_lowerCAmelCase : str = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 300
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 1
|
from __future__ import annotations
_snake_case = tuple[int, int, int]
_snake_case = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_snake_case = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_snake_case = "FOBHMDKEXQNRAULPGSJVTYICZW"
_snake_case = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_snake_case = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_snake_case = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_snake_case = "SGLCPQWZHKXAREONTFBVIYJUDM"
_snake_case = "HVSICLTYKQUBXDWAJZOMFGPREN"
_snake_case = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_snake_case = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_snake_case = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase ) )) < 3:
_lowerCAmelCase : Any = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_lowerCamelCase )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[str] = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : str = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
# Validates string and returns dict
_lowerCAmelCase : Any = _plugboard(_lowerCamelCase )
return rotpos, rotsel, pbdict
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = F"Plugboard setting isn't type string ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
elif len(_lowerCamelCase ) % 2 != 0:
_lowerCAmelCase : Union[str, Any] = F"Odd number of symbols ({len(_lowerCamelCase )})"
raise Exception(_lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
_lowerCAmelCase : List[Any] = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : str = F"'{i}' not in list of symbols"
raise Exception(_lowerCamelCase )
elif i in tmppbl:
_lowerCAmelCase : Tuple = F"Duplicate symbol ({i})"
raise Exception(_lowerCamelCase )
else:
tmppbl.add(_lowerCamelCase )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[str] = {}
for j in range(0 , len(_lowerCamelCase ) - 1 , 2 ):
_lowerCAmelCase : Optional[Any] = pbstring[j + 1]
_lowerCAmelCase : Any = pbstring[j]
return pb
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = (rotora, rotora, rotora) , _lowerCamelCase = "" , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : str = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Optional[Any] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : Optional[Any] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# rotor rb --------------------------
_lowerCAmelCase : int = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : str = rotora[index % len(_lowerCamelCase )]
# rotor rc --------------------------
_lowerCAmelCase : List[Any] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Dict = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Union[str, Any] = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : int = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = "This is my Python script that emulates the Enigma machine from WWII."
_snake_case = (1, 1, 1)
_snake_case = "pictures"
_snake_case = (rotora, rotora, rotora)
_snake_case = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = field(default=a , metadata={'help': 'Whether to use SortishSampler or not.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(__a, __a):
_lowerCAmelCase : str = v.to_dict()
return d
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if "." in tensor_name:
_lowerCAmelCase : Optional[Any] = tensor_name.split("." )
for split in splits[:-1]:
_lowerCAmelCase : int = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_lowerCAmelCase : Any = new_module
_lowerCAmelCase : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_lowerCAmelCase : str = tensor_name in module._buffers
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Union[str, Any] = False
if is_buffer or not is_bitsandbytes_available():
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Any = False
else:
_lowerCAmelCase : Tuple = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCAmelCase : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCAmelCase : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCAmelCase : Any = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
_lowerCAmelCase : Any = value.to("cpu" )
if value.dtype == torch.inta:
_lowerCAmelCase : Optional[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_lowerCAmelCase : List[str] = torch.tensor(_lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
_lowerCAmelCase : Any = new_value.T
_lowerCAmelCase : List[str] = old_value.__dict__
if is_abit:
_lowerCAmelCase : Dict = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
_lowerCAmelCase : Dict = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
_lowerCAmelCase : List[Any] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
_lowerCAmelCase : List[str] = value.to(_lowerCamelCase )
else:
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
_lowerCAmelCase : Any = new_value
else:
_lowerCAmelCase : Any = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
_lowerCAmelCase : Dict = new_value
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase : Tuple = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : int = module.weight.shape
else:
_lowerCAmelCase : Dict = module.in_features
_lowerCAmelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCAmelCase : Dict = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCAmelCase : int = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCAmelCase : str = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCAmelCase : Union[str, Any] = True
# Store the module class in case we need to transpose the weight later
_lowerCAmelCase : Any = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_lowerCAmelCase , _lowerCAmelCase : List[Any] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def A ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCAmelCase : str = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase : Optional[int] = sum(_lowerCamelCase , [] )
_lowerCAmelCase : List[Any] = len(_lowerCamelCase ) > 0
# Check if it is a base model
_lowerCAmelCase : List[str] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase : Any = list(model.named_children() )
_lowerCAmelCase : str = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase : Tuple = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
_lowerCAmelCase : int = [".weight", ".bias"]
_lowerCAmelCase : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase : Optional[Any] = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
| 300
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 1
|
from collections.abc import Callable
import numpy as np
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : List[Any] = np.zeros((n + 1,) )
_lowerCAmelCase : Dict = ya
_lowerCAmelCase : List[Any] = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : str = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
_lowerCAmelCase : List[Any] = y[k] + (
(step_size / 2) * (ode_func(_lowerCamelCase , y[k] ) + ode_func(x + step_size , _lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 100, __a = None, __a = None, __a = True, ):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCAmelCase : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase : List[Any] = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase : Dict = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}.")
_lowerCAmelCase : Optional[int] = int(__a)
if sample_size % down_scale_factor != 0:
_lowerCAmelCase : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process.")
_lowerCAmelCase : Union[str, Any] = int(__a)
_lowerCAmelCase : Dict = next(iter(self.unet.parameters())).dtype
_lowerCAmelCase : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__a)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_lowerCAmelCase : List[str] = randn_tensor(__a, generator=__a, device=self.device, dtype=__a)
# set step values
self.scheduler.set_timesteps(__a, device=audio.device)
_lowerCAmelCase : int = self.scheduler.timesteps.to(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase : str = self.scheduler.step(__a, __a, __a).prev_sample
_lowerCAmelCase : Tuple = audio.clamp(-1, 1).float().cpu().numpy()
_lowerCAmelCase : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a)
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=True, __a=False, __a=10, __a=3, __a=32 * 8, __a=32 * 8, __a=4, __a=64, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : str = use_auxiliary_loss
_lowerCAmelCase : Optional[Any] = num_queries
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Optional[Any] = min_size
_lowerCAmelCase : int = max_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : int = hidden_dim
_lowerCAmelCase : Dict = hidden_dim
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
__a)
_lowerCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size], device=__a)
_lowerCAmelCase : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=__a) > 0.5
).float()
_lowerCAmelCase : Dict = (torch.rand((self.batch_size, self.num_labels), device=__a) > 0.5).long()
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
_lowerCAmelCase : List[Any] = self.num_queries
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : int = [1, 1, 1, 1]
_lowerCAmelCase : Any = self.num_channels
_lowerCAmelCase : List[Any] = 64
_lowerCAmelCase : Optional[Any] = 128
_lowerCAmelCase : Union[str, Any] = self.hidden_dim
_lowerCAmelCase : int = self.hidden_dim
_lowerCAmelCase : List[str] = self.hidden_dim
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = output.encoder_hidden_states
_lowerCAmelCase : Dict = output.pixel_decoder_hidden_states
_lowerCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a), len(config.backbone_config.depths))
self.parent.assertTrue(len(__a), len(config.backbone_config.depths))
self.parent.assertTrue(len(__a), config.decoder_layers)
def snake_case__ ( self, __a, __a, __a, __a=False):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = MaskaFormerModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(pixel_values=__a, pixel_mask=__a)
_lowerCAmelCase : Optional[Any] = model(__a, output_hidden_states=__a)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(__a, __a)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MaskaFormerForUniversalSegmentation(config=__a)
model.to(__a)
model.eval()
def comm_check_on_output(__a):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(pixel_values=__a, pixel_mask=__a)
_lowerCAmelCase : List[str] = model(__a)
comm_check_on_output(__a)
_lowerCAmelCase : Union[str, Any] = model(
pixel_values=__a, pixel_mask=__a, mask_labels=__a, class_labels=__a)
comm_check_on_output(__a)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([1]))
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = MaskaFormerModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, has_text_modality=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a, **__a, output_hidden_states=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__a)
@unittest.skip(reason="Mask2Former does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__a)
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCAmelCase : Tuple = MaskaFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCAmelCase : Dict = {
"pixel_values": torch.randn((2, 3, *size), device=__a),
"mask_labels": torch.randn((2, 10, *size), device=__a),
"class_labels": torch.zeros(2, 10, device=__a).long(),
}
_lowerCAmelCase : Any = self.model_tester.get_config()
_lowerCAmelCase : Optional[int] = MaskaFormerForUniversalSegmentation(__a).to(__a)
_lowerCAmelCase : Optional[int] = model(**__a)
self.assertTrue(outputs.loss is not None)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a, **__a, output_hidden_states=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__a).to(__a)
_lowerCAmelCase : Any = model(**__a, output_attentions=__a)
self.assertTrue(outputs.attentions is not None)
def snake_case__ ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Any = model(__a, mask_labels=__a, class_labels=__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : int = model_class(__a).to(__a)
model.train()
_lowerCAmelCase : Optional[int] = model(__a, mask_labels=__a, class_labels=__a)
_lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCAmelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_snake_case = 1e-4
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(__a)
_lowerCAmelCase : Optional[int] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="pt").to(__a)
_lowerCAmelCase : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__a, (1, 3, 384, 384))
with torch.no_grad():
_lowerCAmelCase : int = model(**__a)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], __a, atol=__a))
_lowerCAmelCase : Any = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], __a, atol=__a))
_lowerCAmelCase : str = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__a).eval()
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Any = image_processor(__a, return_tensors="pt").to(__a)
_lowerCAmelCase : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__a, (1, 3, 384, 384))
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**__a)
# masks_queries_logits
_lowerCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowerCAmelCase : Dict = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
_lowerCAmelCase : List[Any] = torch.tensor(__a).to(__a)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __a, atol=__a))
# class_queries_logits
_lowerCAmelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
]).to(__a)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__a).eval()
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : Dict = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)], return_tensors="pt", )
_lowerCAmelCase : Dict = inputs["pixel_values"].to(__a)
_lowerCAmelCase : List[Any] = [el.to(__a) for el in inputs["mask_labels"]]
_lowerCAmelCase : str = [el.to(__a) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCAmelCase : Dict = model(**__a)
self.assertTrue(outputs.loss is not None)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = GPTaTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="<|endoftext|>", __a="<|endoftext|>", __a="<|endoftext|>", __a=False, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, unk_token=__a, bos_token=__a, eos_token=__a, add_prefix_space=__a, **__a, )
_lowerCAmelCase : Tuple = kwargs.pop("add_bos_token", __a)
_lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : Optional[Any] = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**__a)
_lowerCAmelCase : List[Any] = add_prefix_space
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a, add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
_lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_snake_case = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_snake_case = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_snake_case = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"], reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
], )
def snake_case__ ( self, __a, __a, __a=None, __a=True, __a=False):
'''simple docstring'''
if rouge_types is None:
_lowerCAmelCase : Dict = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_lowerCAmelCase : List[Any] = rouge_scorer.RougeScorer(rouge_types=__a, use_stemmer=__a)
if use_aggregator:
_lowerCAmelCase : List[Any] = scoring.BootstrapAggregator()
else:
_lowerCAmelCase : int = []
for ref, pred in zip(__a, __a):
_lowerCAmelCase : int = scorer.score(__a, __a)
if use_aggregator:
aggregator.add_scores(__a)
else:
scores.append(__a)
if use_aggregator:
_lowerCAmelCase : List[Any] = aggregator.aggregate()
else:
_lowerCAmelCase : Union[str, Any] = {}
for key in scores[0]:
_lowerCAmelCase : List[str] = [score[key] for score in scores]
return result
| 300
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 1
|
_snake_case = {}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCAmelCase : List[str] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCAmelCase : Any = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCAmelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCAmelCase : Tuple = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCAmelCase : int = state_late + state_absent + state_ontime
_lowerCAmelCase : List[str] = prizestrings
return prizestrings
def A ( _lowerCamelCase = 30 ):
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 300
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"tokenizer_file": "tokenizer.json"}
_snake_case = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = None
def __init__( self, __a=None, __a=None, __a=None, __a="<unk>", __a="<s>", __a="</s>", __a="<pad>", __a=False, __a=False, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, unk_token=__a, bos_token=__a, eos_token=__a, pad_token=__a, add_prefix_space=__a, clean_up_tokenization_spaces=__a, **__a, )
_lowerCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : List[str] = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : Optional[int] = pre_tok_class(**__a)
_lowerCAmelCase : Optional[int] = add_prefix_space
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.get("is_split_into_words", __a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs.")
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.get("is_split_into_words", __a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs.")
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a, add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
_lowerCAmelCase : Dict = input_ids[-self.model_max_length :]
return input_ids
| 300
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_snake_case = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_snake_case = sorted(arg_to_scheduler.keys())
_snake_case = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class UpperCAmelCase_ ( pl.LightningModule):
def __init__( self, __a, __a=None, __a="base", __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__a)
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = Path(self.hparams.output_dir)
_lowerCAmelCase : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=__a, **__a, )
else:
_lowerCAmelCase : PretrainedConfig = config
_lowerCAmelCase : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, __a, __a):
assert hasattr(self.config, __a), f"model config doesn't have a `{p}` attribute"
setattr(self.config, __a, getattr(self.hparams, __a))
if tokenizer is None:
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=__a, )
else:
_lowerCAmelCase : PreTrainedTokenizer = tokenizer
_lowerCAmelCase : Any = MODEL_MODES[mode]
if model is None:
_lowerCAmelCase : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=__a, )
else:
_lowerCAmelCase : Optional[int] = model
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_type.from_pretrained(*__a, **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = arg_to_scheduler[self.hparams.lr_scheduler]
_lowerCAmelCase : Tuple = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps())
_lowerCAmelCase : Dict = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model
_lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
_lowerCAmelCase : Optional[Any] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
_lowerCAmelCase : int = Adafactor(
__a, lr=self.hparams.learning_rate, scale_parameter=__a, relative_step=__a)
else:
_lowerCAmelCase : List[Any] = AdamW(
__a, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
_lowerCAmelCase : Optional[Any] = optimizer
_lowerCAmelCase : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return self.validation_step(__a, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.validation_end(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
_lowerCAmelCase : List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def snake_case__ ( self, __a):
'''simple docstring'''
if stage == "test":
_lowerCAmelCase : List[Any] = len(self.test_dataloader().dataset)
else:
_lowerCAmelCase : str = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=__a)
_lowerCAmelCase : Tuple = len(self.train_dataloader().dataset)
def snake_case__ ( self, __a, __a, __a = False):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task")
def snake_case__ ( self):
'''simple docstring'''
return self.train_loader
def snake_case__ ( self):
'''simple docstring'''
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=__a)
def snake_case__ ( self):
'''simple docstring'''
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir, "cached_{}_{}_{}".format(
__a, list(filter(__a, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), )
@pl.utilities.rank_zero_only
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.output_dir.joinpath("best_tfmr")
_lowerCAmelCase : Tuple = self.step_count
self.model.save_pretrained(__a)
self.tokenizer.save_pretrained(__a)
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path", default=__a, type=__a, required=__a, help="Path to pretrained model or model identifier from huggingface.co/models", )
parser.add_argument(
"--config_name", default="", type=__a, help="Pretrained config name or path if not the same as model_name")
parser.add_argument(
"--tokenizer_name", default=__a, type=__a, help="Pretrained tokenizer name or path if not the same as model_name", )
parser.add_argument(
"--cache_dir", default=str(Path(__a).parent / "test_run" / "cache"), type=__a, help="Where do you want to store the pre-trained models downloaded from huggingface.co", )
parser.add_argument(
"--encoder_layerdrop", type=__a, help="Encoder layer dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--decoder_layerdrop", type=__a, help="Decoder layer dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--dropout", type=__a, help="Dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--attention_dropout", type=__a, help="Attention dropout probability (Optional). Goes into model.config", )
parser.add_argument("--learning_rate", default=5E-5, type=__a, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler", default="linear", choices=__a, metavar=__a, type=__a, help="Learning rate scheduler", )
parser.add_argument("--weight_decay", default=0.0, type=__a, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1E-8, type=__a, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=__a, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=__a, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=__a)
parser.add_argument("--train_batch_size", default=32, type=__a)
parser.add_argument("--eval_batch_size", default=32, type=__a)
parser.add_argument("--adafactor", action="store_true")
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__a)
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = trainer.lr_schedulers[0]["scheduler"]
_lowerCAmelCase : List[str] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
rank_zero_info("***** Validation results *****")
_lowerCAmelCase : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(__a):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__a, str(metrics[key])))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
rank_zero_info("***** Test results *****")
_lowerCAmelCase : Any = trainer.callback_metrics
# Log and save results to file
_lowerCAmelCase : Any = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(__a, "w") as writer:
for key in sorted(__a):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__a, str(metrics[key])))
writer.write("{} = {}\n".format(__a, str(metrics[key])))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
parser.add_argument(
"--output_dir" , default=str(Path(_lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=_lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_lowerCAmelCase : str = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_lowerCAmelCase : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowerCamelCase )
if logging_callback is None:
_lowerCAmelCase : Tuple = LoggingCallback()
_lowerCAmelCase : Any = {}
if args.fpaa:
_lowerCAmelCase : Optional[int] = 16
if args.gpus > 1:
_lowerCAmelCase : Any = "auto"
_lowerCAmelCase : List[Any] = "ddp"
_lowerCAmelCase : Union[str, Any] = args.accumulate_grad_batches
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = "auto"
_lowerCAmelCase : Dict = pl.Trainer.from_argparse_args(
_lowerCamelCase , weights_summary=_lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowerCamelCase , )
if args.do_train:
trainer.fit(_lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
def __init__( self, __a, __a=16, __a=13, __a=7, __a=14, __a=10, __a=19, __a=5, __a=4, __a=True, __a=16, __a=2, __a=4, __a=4, __a="gelu", __a=0.1, __a=0.1, __a=[1, 2, 3, 4, 5], __a=25, __a=5, ):
'''simple docstring'''
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : List[Any] = cardinality
_lowerCAmelCase : Union[str, Any] = num_time_features
_lowerCAmelCase : Any = lags_sequence
_lowerCAmelCase : Optional[Any] = embedding_dimension
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = context_length
_lowerCAmelCase : Tuple = prediction_length + label_length
_lowerCAmelCase : List[Any] = label_length
_lowerCAmelCase : Optional[Any] = moving_average
_lowerCAmelCase : List[str] = autocorrelation_factor
def snake_case__ ( self):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, label_length=self.label_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], moving_average=self.moving_average, )
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = config.context_length + max(config.lags_sequence)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, 1], config.cardinality[0])
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length])
_lowerCAmelCase : str = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, config.prediction_length])
_lowerCAmelCase : List[str] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_config()
_lowerCAmelCase : Dict = self.prepare_autoformer_inputs_dict(__a)
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AutoformerModel(config=__a).to(__a).eval()
_lowerCAmelCase : List[str] = model(**__a)
_lowerCAmelCase : List[Any] = outputs.encoder_last_hidden_state
_lowerCAmelCase : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__a)
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(__a).to(__a)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = model.create_network_inputs(**__a)
_lowerCAmelCase , _lowerCAmelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]), dim=-1, )
_lowerCAmelCase : Tuple = encoder(inputs_embeds=__a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
_lowerCAmelCase : Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1)
.unsqueeze(1)
.repeat(1, config.prediction_length, 1)
)
_lowerCAmelCase : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
_lowerCAmelCase : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : int = model.get_decoder()
decoder.save_pretrained(__a)
_lowerCAmelCase : Tuple = AutoformerDecoder.from_pretrained(__a).to(__a)
_lowerCAmelCase : Any = decoder(
trend=__a, inputs_embeds=__a, encoder_hidden_states=__a, )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = AutoformerModelTester(self)
_lowerCAmelCase : List[str] = ConfigTester(self, config_class=__a, has_text_modality=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a)
_lowerCAmelCase , _lowerCAmelCase : Any = model_class.from_pretrained(__a, output_loading_info=__a)
self.assertEqual(info["missing_keys"], [])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a)
@unittest.skip(reason="Model has no tokens embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = inspect.signature(getattr(__a, "forward"))
# The main input is the name of the argument after `self`
_lowerCAmelCase : Union[str, Any] = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__a)
_lowerCAmelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask")
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
])
self.assertListEqual(arg_names[: len(__a)], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = getattr(self.model_tester, "seq_length", __a)
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester, "decoder_seq_length", __a)
_lowerCAmelCase : Tuple = getattr(self.model_tester, "encoder_seq_length", __a)
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester, "d_model", __a)
_lowerCAmelCase : List[Any] = getattr(self.model_tester, "num_attention_heads", __a)
_lowerCAmelCase : int = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
_lowerCAmelCase : Dict = len(__a)
_lowerCAmelCase : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a, __a)
# decoder attentions
_lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(__a, (list, tuple))
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# cross attentions
_lowerCAmelCase : List[Any] = outputs.cross_attentions
self.assertIsInstance(__a, (list, tuple))
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# Check attention is always last and order is fine
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[int] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(__a, __a))
self.assertEqual(out_len + 2, len(__a))
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
@is_flaky()
def snake_case__ ( self):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def A ( _lowerCamelCase="train-batch.pt" ):
'''simple docstring'''
_lowerCAmelCase : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCamelCase , repo_type="dataset" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(
past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape, __a)
_lowerCAmelCase : List[str] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]], device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : Union[str, Any] = prepare_batch("val-batch.pt")
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], ).encoder_last_hidden_state
_lowerCAmelCase : Dict = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]], device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : Optional[Any] = prepare_batch("val-batch.pt")
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model.generate(
static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], )
_lowerCAmelCase : Dict = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape, __a)
_lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786], device=__a)
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:], __a, rtol=1E-1))
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.