code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def A ( _A ):
"""simple docstring"""
if len(__lowerCamelCase ) == 0:
return array
snake_case_ , snake_case_ :List[Any] = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
snake_case_ :Any = _max - _min + 1
snake_case_ , snake_case_ :Any = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
snake_case_ :Optional[Any] = i - _min
snake_case_ :List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
snake_case_ :Tuple = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
snake_case_ :Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : str = input('Enter numbers separated by comma:\n')
__UpperCAmelCase : Tuple = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 584 | from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__a = i - _min
__a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__a = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Tuple = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 559 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__magic_name__ = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__SCREAMING_SNAKE_CASE = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
datasets.utils.logging.set_verbosity(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = predict_dataset.features['''label'''].names
# Labels
UpperCAmelCase = len(A__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel={str(A__ ): label for i, label in enumerate(A__ )} , labelaid={label: i for i, label in enumerate(A__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase = False
def preprocess_function(A__: Dict ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=A__ , max_length=data_args.max_seq_length , truncation=A__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(A__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_predict_samples )
UpperCAmelCase = predict_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCAmelCase = predict_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
UpperCAmelCase = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__: EvalPrediction ):
UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , A__ ) else p.predictions
UpperCAmelCase = np.argmax(A__ , axis=1 )
return metric.compute(predictions=A__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase = default_data_collator
elif training_args.fpaa:
UpperCAmelCase = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 )
else:
UpperCAmelCase = None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=A__ )
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , A__ )
trainer.save_metrics('''train''' , A__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate(eval_dataset=A__ )
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ )
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(A__ , metric_key_prefix='''predict''' )
UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A__ )
)
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''predict''' , A__ )
trainer.save_metrics('''predict''' , A__ )
UpperCAmelCase = np.argmax(A__ , axis=1 )
UpperCAmelCase = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(A__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(A__ ):
UpperCAmelCase = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 391 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : int = '''docs/source/en/_toctree.yml'''
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = defaultdict(_lowerCamelCase )
__snake_case = []
__snake_case = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(_lowerCamelCase )
__snake_case = new_doc_list
__snake_case = [key for key, value in counts.items() if value > 1]
__snake_case = []
for duplicate_key in duplicates:
__snake_case = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def _UpperCamelCase (_lowerCamelCase : Tuple=False )-> Any:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case = api_doc[scheduler_idx]['''sections''']
__snake_case = clean_doc_toc(_lowerCamelCase )
__snake_case = False
if new_scheduler_doc != scheduler_doc:
__snake_case = True
if overwrite:
__snake_case = new_scheduler_doc
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _UpperCamelCase (_lowerCamelCase : int=False )-> Optional[int]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case = False
__snake_case = api_doc[pipeline_idx]['''sections''']
__snake_case = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case = pipeline_doc['''section''']
__snake_case = clean_doc_toc(_lowerCamelCase )
if overwrite:
__snake_case = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
__snake_case = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
__snake_case = True
if overwrite:
__snake_case = new_pipeline_docs
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Dict = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 24 |
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
__snake_case :List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( snake_case__ : int = 100 ):
'''simple docstring'''
__snake_case :Tuple = 1
__snake_case :Dict = 2
for i in range(2 ,max_n + 1 ):
__snake_case :Optional[int] = pre_numerator
__snake_case :List[Any] = 2 * i // 3 if i % 3 == 0 else 1
__snake_case :Dict = cur_numerator
__snake_case :Optional[Any] = e_cont * pre_numerator + temp
return sum_digits(snake_case__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 455 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 373 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = len(lowercase__ ), len(lowercase__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__ (self ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : int = self.mismatch_in_text(lowercase__ )
if mismatch_index == -1:
positions.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase__ : Optional[Any] = 'ABAABA'
UpperCAmelCase__ : Union[str, Any] = 'AB'
UpperCAmelCase__ : str = BoyerMooreSearch(text, pattern)
UpperCAmelCase__ : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 223 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _a ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "FlavaImageProcessor"
a__ = ("BertTokenizer", "BertTokenizerFast")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ) -> Tuple:
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__UpperCAmelCase = kwargs.pop('''feature_extractor''' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
__UpperCAmelCase = self.image_processor
def __call__(self , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if images is not None:
__UpperCAmelCase = self.image_processor(
lowercase__ , return_image_mask=lowercase__ , return_codebook_pixels=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if text is not None and images is not None:
encoding.update(lowercase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Tuple:
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase_ (self ) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def lowerCAmelCase_ (self ) -> Dict:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 303 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __lowerCAmelCase : List[Any] ) -> list:
_UpperCamelCase : List[str] = len(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCamelCase : Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = list(range(1_0, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 719 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase : List[str] = None
if self.model.config.prefix is not None:
_UpperCamelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params )
_UpperCamelCase : str = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase : List[str] = {**self._forward_params, **forward_params}
def lowercase_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {}
if prefix is not None:
_UpperCamelCase : Union[str, Any] = prefix
if prefix:
_UpperCamelCase : Optional[int] = self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']" )
_UpperCamelCase : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase__ )
_UpperCamelCase : List[str] = generate_kwargs
_UpperCamelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : List[str] = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase : Any = self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_UpperCamelCase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : List[str] = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase : Union[str, Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
_UpperCamelCase : str = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_UpperCamelCase : Tuple = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase : Tuple = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowercase_ (self , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = model_inputs["input_ids"]
_UpperCamelCase : List[str] = model_inputs.get("attention_mask" , lowerCAmelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Dict = 1
else:
_UpperCamelCase : Any = input_ids.shape[0]
_UpperCamelCase : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase : Tuple = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_UpperCamelCase : Tuple = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase : List[str] = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase : Optional[Any] = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : List[str] = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase : str = generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase : Union[str, Any] = tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=ReturnType.FULL_TEXT , lowerCAmelCase__=True ):
'''simple docstring'''
_UpperCamelCase : str = model_outputs["generated_sequence"][0]
_UpperCamelCase : Tuple = model_outputs["input_ids"]
_UpperCamelCase : Dict = model_outputs["prompt_text"]
_UpperCamelCase : str = generated_sequence.numpy().tolist()
_UpperCamelCase : Optional[int] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase : List[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase : Dict = self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase : int = 0
else:
_UpperCamelCase : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase : int = prompt_text + text[prompt_length:]
else:
_UpperCamelCase : Any = text[prompt_length:]
_UpperCamelCase : Dict = {"generated_text": all_text}
records.append(lowerCAmelCase__ )
return records
| 239 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Union[str, Any] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 548 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[int] , __lowerCamelCase: list[int] , __lowerCamelCase: list[int] , __lowerCamelCase: list[list[str]] , __lowerCamelCase: int , ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowerCamelCase , __lowerCamelCase , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
depth_first_search([] , [] , [] , __lowerCamelCase , __lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(__lowerCamelCase )
print("" )
print(len(__lowerCamelCase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 601 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ = b * b - 4 * a * c
lowercase_ = (-b + sqrt(__lowerCamelCase )) / (2 * a)
lowercase_ = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ , lowercase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 601 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _UpperCamelCase ( UpperCamelCase = "AAPL" ) -> str:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__UpperCAmelCase : str = BeautifulSoup(requests.get(UpperCamelCase ).text , "html.parser" )
__UpperCAmelCase : str = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 77 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 287 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
SCREAMING_SNAKE_CASE__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE__ = f'down_blocks.{i}.resnets.{j}.'
SCREAMING_SNAKE_CASE__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE__ = f'down_blocks.{i}.attentions.{j}.'
SCREAMING_SNAKE_CASE__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE__ = f'up_blocks.{i}.resnets.{j}.'
SCREAMING_SNAKE_CASE__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE__ = f'up_blocks.{i}.attentions.{j}.'
SCREAMING_SNAKE_CASE__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE__ = f'down_blocks.{i}.downsamplers.0.conv.'
SCREAMING_SNAKE_CASE__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE__ = f'up_blocks.{i}.upsamplers.0.'
SCREAMING_SNAKE_CASE__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE__ = "mid_block.attentions.0."
SCREAMING_SNAKE_CASE__ = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE__ = f'mid_block.resnets.{j}.'
SCREAMING_SNAKE_CASE__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
lowerCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE__ = f'encoder.down_blocks.{i}.resnets.{j}.'
SCREAMING_SNAKE_CASE__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE__ = f'down_blocks.{i}.downsamplers.0.'
SCREAMING_SNAKE_CASE__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE__ = f'up_blocks.{i}.upsamplers.0.'
SCREAMING_SNAKE_CASE__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE__ = f'decoder.up_blocks.{i}.resnets.{j}.'
SCREAMING_SNAKE_CASE__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE__ = f'mid_block.resnets.{i}.'
SCREAMING_SNAKE_CASE__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
lowerCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowerCAmelCase = reshape_weight_for_sd(SCREAMING_SNAKE_CASE )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
SCREAMING_SNAKE_CASE__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE__ = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE__ = {"q": 0, "k": 1, "v": 2}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase = [None, None, None]
lowerCAmelCase = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase = [None, None, None]
lowerCAmelCase = v
continue
lowerCAmelCase = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE )
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE__ = load_file(unet_path, device="cpu")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
SCREAMING_SNAKE_CASE__ = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE__ = load_file(vae_path, device="cpu")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
SCREAMING_SNAKE_CASE__ = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE__ = load_file(text_enc_path, device="cpu")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
SCREAMING_SNAKE_CASE__ = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
SCREAMING_SNAKE_CASE__ = convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE__ = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE__ = convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE__ = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE__ = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE__ = {"transformer." + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE__ = convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE__ = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE__ = convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE__ = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE__ = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 393 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
return np.maximum(0 , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 393 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Dict , __snake_case : VQModel , __snake_case : UNetaDModel , __snake_case : DDIMScheduler ) -> List[str]:
super().__init__()
self.register_modules(vqvae=__snake_case , unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Tuple , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Any , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__snake_case , )
UpperCAmelCase : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__snake_case )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : str = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase : Tuple = self.unet(__snake_case , __snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : List[Any] = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Dict = self.vqvae.decode(__snake_case ).sample
UpperCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 127 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> int:
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = BlipImageProcessor()
UpperCAmelCase : int = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase : Dict = BlipaProcessor(__snake_case , __snake_case )
processor.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] , **__snake_case : Union[str, Any] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).tokenizer
def A ( self : List[Any] , **__snake_case : List[Any] ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def A ( self : Optional[int] ) -> str:
shutil.rmtree(self.tmpdirname )
def A ( self : Any ) -> int:
UpperCAmelCase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : int ) -> Tuple:
UpperCAmelCase : Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase : Dict = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
UpperCAmelCase : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : List[Any] = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase : List[Any] = self.prepare_image_inputs()
UpperCAmelCase : Union[str, Any] = image_processor(__snake_case , return_tensors='''np''' )
UpperCAmelCase : List[str] = processor(images=__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = self.get_image_processor()
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Tuple = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase : Any = '''lower newer'''
UpperCAmelCase : Optional[Any] = processor(text=__snake_case )
UpperCAmelCase : Optional[Any] = tokenizer(__snake_case , return_token_type_ids=__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : Tuple = self.get_tokenizer()
UpperCAmelCase : List[Any] = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase : Union[str, Any] = '''lower newer'''
UpperCAmelCase : Dict = self.prepare_image_inputs()
UpperCAmelCase : Any = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def A ( self : str ) -> Any:
UpperCAmelCase : Tuple = self.get_image_processor()
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Tuple = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Optional[Any] = processor.batch_decode(__snake_case )
UpperCAmelCase : Dict = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def A ( self : List[str] ) -> str:
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : Dict = BlipaProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase : int = '''lower newer'''
UpperCAmelCase : str = self.prepare_image_inputs()
UpperCAmelCase : List[str] = processor(text=__snake_case , images=__snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 127 | 1 |
from ..utils import DummyObject, requires_backends
class _a( metaclass=__A ):
lowerCamelCase__ :Dict = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a( metaclass=__A ):
lowerCamelCase__ :int = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a( metaclass=__A ):
lowerCamelCase__ :int = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a( metaclass=__A ):
lowerCamelCase__ :List[Any] = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def lowercase ( cls , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] ) | 721 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( UpperCAmelCase=None ):
_snake_case : Union[str, Any] = argparse.ArgumentParser(add_help=UpperCAmelCase , allow_abbrev=UpperCAmelCase )
# The main config parser
_snake_case : Tuple = config_command_parser(UpperCAmelCase )
# The subparser to add commands to
_snake_case : Any = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(UpperCAmelCase , parents=[parent_parser] )
update_command_parser(UpperCAmelCase , parents=[parent_parser] )
return config_parser
def A ( ):
_snake_case : str = get_config_parser()
_snake_case : Union[str, Any] = config_parser.parse_args()
if not hasattr(UpperCAmelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase )
if __name__ == "__main__":
main() | 278 | 0 |
import numpy as np
from PIL import Image
def UpperCAmelCase__( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : int ):
__snake_case : List[str] = np.array(__UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = 0
__snake_case : Optional[int] = 0
__snake_case : List[str] = 0
# compute the shape of the output matrix
__snake_case : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__snake_case : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__snake_case : Dict = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__snake_case : int = 0
__snake_case : List[Any] = 0
return updated_arr
def UpperCAmelCase__( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : int ):
__snake_case : Any = np.array(__UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__snake_case : Dict = 0
__snake_case : Dict = 0
__snake_case : Optional[int] = 0
__snake_case : Tuple = 0
# compute the shape of the output matrix
__snake_case : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__snake_case : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__snake_case : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__snake_case : str = 0
__snake_case : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__magic_name__ = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 576 | import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__magic_name__ = '''bart'''
__magic_name__ = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__snake_case : List[Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__snake_case : Any = qar_model.eval()
else:
__snake_case , __snake_case : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : Tuple = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__snake_case : List[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__snake_case : List[str] = sas_model.eval()
else:
__snake_case , __snake_case : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
if LOAD_DENSE_INDEX:
__snake_case : List[Any] = faiss.StandardGpuResources()
__snake_case : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__snake_case : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
__snake_case : List[str] = faiss.IndexFlatIP(1_28 )
__snake_case : int = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : str = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__snake_case : List[Any] = elia['train_eli5']
__snake_case : Dict = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
__snake_case : str = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__magic_name__ , __magic_name__ , __magic_name__ = load_indexes()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = load_models()
__magic_name__ , __magic_name__ = load_train_data()
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=10 ):
__snake_case : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case , __snake_case : Union[str, Any] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[int] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int="wiki40b" , __UpperCAmelCase : List[str]="dense" , __UpperCAmelCase : Any=10 ):
if source == "none":
__snake_case , __snake_case : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : int = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__snake_case , __snake_case : Dict = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
__snake_case : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case : Any = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=64 , __UpperCAmelCase : int=2_56 , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Union[str, Any]=0.95 , __UpperCAmelCase : Tuple=0.8 ):
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__magic_name__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__magic_name__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__magic_name__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__magic_name__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__magic_name__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
__magic_name__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__magic_name__ = action_list.index(action_st)
__magic_name__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__magic_name__ = show_type == '''Show full text of passages'''
else:
__magic_name__ = 3
__magic_name__ = True
__magic_name__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__magic_name__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__magic_name__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__magic_name__ = '''wiki40b'''
__magic_name__ = '''dense'''
__magic_name__ = '''beam'''
__magic_name__ = 2
__magic_name__ = 64
__magic_name__ = 256
__magic_name__ = None
__magic_name__ = None
__magic_name__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
__magic_name__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__magic_name__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__magic_name__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__magic_name__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__magic_name__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__magic_name__ = None
# start main text
__magic_name__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__magic_name__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__magic_name__ = st.text_input('''Enter your question here:''', '''''')
else:
__magic_name__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__magic_name__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__magic_name__ = support_list[:10]
__magic_name__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__magic_name__ , __magic_name__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__magic_name__ , __magic_name__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__magic_name__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__magic_name__ = res[1].strip()
if sec_titles == "":
__magic_name__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
__magic_name__ = sec_titles.split(''' & ''')
__magic_name__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__magic_name__ = find_nearest_training(question)
__magic_name__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__magic_name__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__magic_name__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 576 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : str=[32, 64, 1_28] , lowerCAmelCase_ : str=[1, 2, 1] , lowerCAmelCase_ : int=[2, 2, 4] , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=2.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : Dict=8 , lowerCAmelCase_ : Any=["stage1", "stage2"] , lowerCAmelCase_ : Union[str, Any]=[1, 2] , ) -> List[str]:
'''simple docstring'''
A__ : int =parent
A__ : Optional[Any] =batch_size
A__ : List[Any] =image_size
A__ : List[Any] =patch_size
A__ : Tuple =num_channels
A__ : Union[str, Any] =embed_dim
A__ : Any =hidden_sizes
A__ : List[str] =depths
A__ : str =num_heads
A__ : Union[str, Any] =window_size
A__ : Optional[Any] =mlp_ratio
A__ : str =qkv_bias
A__ : Optional[Any] =hidden_dropout_prob
A__ : Tuple =attention_probs_dropout_prob
A__ : str =drop_path_rate
A__ : List[Any] =hidden_act
A__ : Union[str, Any] =use_absolute_embeddings
A__ : List[str] =patch_norm
A__ : Dict =layer_norm_eps
A__ : List[str] =initializer_range
A__ : int =is_training
A__ : List[str] =scope
A__ : Tuple =use_labels
A__ : Optional[Any] =type_sequence_label_size
A__ : Any =encoder_stride
A__ : Any =out_features
A__ : Any =out_indices
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Union[str, Any] =None
if self.use_labels:
A__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
A__ : Dict =FocalNetModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : Union[str, Any] =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A__ : List[str] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =FocalNetBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
A__ : List[str] =None
A__ : Any =FocalNetBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Tuple =FocalNetForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ : Tuple =1
A__ : Any =FocalNetForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : List[str] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
A__ : Tuple =self.type_sequence_label_size
A__ : Optional[Any] =FocalNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : int =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ : Any =1
A__ : Tuple =FocalNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Dict ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__snake_case = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =FocalNetModelTester(self )
A__ : Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 , has_text_modality=lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ : Optional[Any] =model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ : Dict =model_class(lowerCAmelCase_ )
A__ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Union[str, Any] =[*signature.parameters.keys()]
A__ : List[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Dict =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
A__ : Optional[int] =outputs.hidden_states
A__ : Any =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# FocalNet has a different seq_length
A__ : Optional[Any] =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A__ : str =outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
A__ , A__ , A__ , A__ : Optional[Any] =reshaped_hidden_states[0].shape
A__ : Optional[int] =(
reshaped_hidden_states[0].view(lowerCAmelCase_ , lowerCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[str] =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
A__ : Any =True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Tuple =True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ , A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[Any] =3
A__ : Optional[Any] =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ : int =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ : Tuple =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ : Union[str, Any] =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
A__ : Any =True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Any =True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] =FocalNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ , A__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
A__ : str =_config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
A__ : List[Any] =model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : str =FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase_ )
A__ : Any =self.default_image_processor
A__ : Union[str, Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ : int =image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
A__ : Dict =model(**lowerCAmelCase_ )
# verify the logits
A__ : Dict =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
A__ : List[str] =torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (FocalNetBackbone,) if is_torch_available() else ()
__snake_case = FocalNetConfig
__snake_case = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =FocalNetModelTester(self )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
_lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
_lowerCAmelCase : Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase : Any = f"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase : Dict = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase : int = f"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase : Any = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase : Tuple = f"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase : Optional[Any] = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase : Any = f"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase : List[Any] = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase : Dict = f"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase : int = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase : Optional[Any] = f"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase : Union[str, Any] = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase : Optional[Any] = "mid_block.attentions.0."
_lowerCAmelCase : List[Any] = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase : str = f"""mid_block.resnets.{j}."""
_lowerCAmelCase : Dict = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(snake_case__ , snake_case__ )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(snake_case__ , snake_case__ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase : Union[str, Any] = f"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase : List[Any] = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase : Union[str, Any] = f"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase : Optional[Any] = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase : List[str] = f"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase : Optional[int] = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase : Optional[Any] = f"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase : str = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase : Union[str, Any] = f"""mid_block.resnets.{i}."""
_lowerCAmelCase : int = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(snake_case__ , snake_case__ )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(snake_case__ , snake_case__ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
lowerCAmelCase__ = reshape_weight_for_sd(snake_case__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase : Dict = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
_lowerCAmelCase : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase : Dict = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase : Dict = {"q": 0, "k": 1, "v": 2}
def UpperCAmelCase_ ( snake_case__ ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
lowerCAmelCase__ = k[: -len('.q_proj.weight' )]
lowerCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
lowerCAmelCase__ = k[: -len('.q_proj.bias' )]
lowerCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
lowerCAmelCase__ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowerCAmelCase__ = torch.cat(snake_case__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
lowerCAmelCase__ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowerCAmelCase__ = torch.cat(snake_case__ )
return new_state_dict
def UpperCAmelCase_ ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
_lowerCAmelCase : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase : Tuple = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
_lowerCAmelCase : int = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
_lowerCAmelCase : Tuple = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase : Optional[Any] = load_file(unet_path, device="cpu")
else:
_lowerCAmelCase : List[str] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
_lowerCAmelCase : List[str] = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
_lowerCAmelCase : List[Any] = load_file(vae_path, device="cpu")
else:
_lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
_lowerCAmelCase : List[str] = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
_lowerCAmelCase : Union[str, Any] = load_file(text_enc_path, device="cpu")
else:
_lowerCAmelCase : List[str] = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
_lowerCAmelCase : Optional[int] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
_lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase : Tuple = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase : str = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase : int = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase : Any = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase : Dict = {"transformer." + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase : Any = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase : Dict = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase : Tuple = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase : Optional[int] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase : List[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase : Union[str, Any] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 193 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : Optional[int] = "MobileNetV1Config"
# Base docstring
_lowerCAmelCase : Tuple = "google/mobilenet_v1_1.0_224"
_lowerCAmelCase : Optional[int] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_lowerCAmelCase : Tuple = "google/mobilenet_v1_1.0_224"
_lowerCAmelCase : List[Any] = "tabby, tabby cat"
_lowerCAmelCase : Tuple = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = model.mobilenet_va
else:
lowerCAmelCase__ = model
lowerCAmelCase__ = 'MobilenetV1/Conv2d_0/'
lowerCAmelCase__ = backbone.conv_stem.convolution.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.bias
lowerCAmelCase__ = backbone.conv_stem.normalization.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = i * 2
lowerCAmelCase__ = backbone.layer[pt_index]
lowerCAmelCase__ = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
lowerCAmelCase__ = backbone.layer[pt_index + 1]
lowerCAmelCase__ = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowerCAmelCase__ = model.classifier.weight
lowerCAmelCase__ = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(snake_case__ )
lowerCAmelCase__ = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
lowerCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
lowerCAmelCase__ = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ = _build_tf_to_pytorch_map(snake_case__ , snake_case__ , snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
lowerCAmelCase__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
lowerCAmelCase__ = np.transpose(snake_case__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase__ = array.squeeze().transpose()
else:
lowerCAmelCase__ = np.transpose(snake_case__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
lowerCAmelCase__ = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ , snake_case__ )
tf_weights.pop(name + '/RMSProp' , snake_case__ )
tf_weights.pop(name + '/RMSProp_1' , snake_case__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , snake_case__ )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> torch.Tensor:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ = max(kernel_height - stride_height , 0 )
else:
lowerCAmelCase__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCAmelCase__ = max(kernel_width - stride_width , 0 )
else:
lowerCAmelCase__ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCAmelCase__ = pad_along_width // 2
lowerCAmelCase__ = pad_along_width - pad_left
lowerCAmelCase__ = pad_along_height // 2
lowerCAmelCase__ = pad_along_height - pad_top
lowerCAmelCase__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ , snake_case__ , 'constant' , 0.0 )
class __snake_case ( nn.Module ):
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ = 1 ,a_ = 1 ,a_ = False ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
lowerCAmelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ = nn.Convad(
in_channels=a_ ,out_channels=a_ ,kernel_size=a_ ,stride=a_ ,padding=a_ ,groups=a_ ,bias=a_ ,padding_mode='zeros' ,)
if use_normalization:
lowerCAmelCase__ = nn.BatchNormad(
num_features=a_ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=a_ ,track_running_stats=a_ ,)
else:
lowerCAmelCase__ = None
if use_activation:
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,a_ ):
lowerCAmelCase__ = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ = config.hidden_act
else:
lowerCAmelCase__ = None
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if self.config.tf_padding:
lowerCAmelCase__ = apply_tf_padding(a_ ,self.convolution )
lowerCAmelCase__ = self.convolution(a_ )
if self.normalization is not None:
lowerCAmelCase__ = self.normalization(a_ )
if self.activation is not None:
lowerCAmelCase__ = self.activation(a_ )
return features
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = MobileNetVaConfig
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE__ = 'mobilenet_v1'
SCREAMING_SNAKE_CASE__ = 'pixel_values'
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if isinstance(a_ ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a_ ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowerCAmelCase : Tuple = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , SCREAMING_SNAKE_CASE , )
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ = True ):
"""simple docstring"""
super().__init__(a_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = 32
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCAmelCase__ = MobileNetVaConvLayer(
a_ ,in_channels=config.num_channels ,out_channels=a_ ,kernel_size=3 ,stride=2 ,)
lowerCAmelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a_ ,in_channels=a_ ,out_channels=a_ ,kernel_size=3 ,stride=strides[i] ,groups=a_ ,) )
self.layer.append(
MobileNetVaConvLayer(
a_ ,in_channels=a_ ,out_channels=a_ ,kernel_size=1 ,) )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=a_ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ = None ,a_ = None ,a_ = None ,):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCAmelCase__ = self.conv_stem(a_ )
lowerCAmelCase__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ = layer_module(a_ )
if output_hidden_states:
lowerCAmelCase__ = all_hidden_states + (hidden_states,)
lowerCAmelCase__ = hidden_states
if self.pooler is not None:
lowerCAmelCase__ = torch.flatten(self.pooler(a_ ) ,start_dim=1 )
else:
lowerCAmelCase__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ ,pooler_output=a_ ,hidden_states=a_ ,)
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , )
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ):
"""simple docstring"""
super().__init__(a_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = MobileNetVaModel(a_ )
lowerCAmelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ = nn.Dropout(config.classifier_dropout_prob ,inplace=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=a_ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,):
"""simple docstring"""
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.mobilenet_va(a_ ,output_hidden_states=a_ ,return_dict=a_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(self.dropout(a_ ) )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = 'single_label_classification'
else:
lowerCAmelCase__ = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(a_ ,a_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(a_ ,a_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a_ ,logits=a_ ,hidden_states=outputs.hidden_states ,)
| 193 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[tuple[int, int]]:
UpperCamelCase__, UpperCamelCase__ : Optional[int] = position
UpperCamelCase__ : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase__ : Any = []
for position in positions:
UpperCamelCase__, UpperCamelCase__ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase_)
return permissible_positions
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return not any(elem == 0 for row in board for elem in row)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> bool:
if is_complete(lowerCamelCase_):
return True
for position in get_valid_pos(lowerCamelCase_ , len(lowerCamelCase_)):
UpperCamelCase__, UpperCamelCase__ : str = position
if board[y][x] == 0:
UpperCamelCase__ : Optional[Any] = curr + 1
if open_knight_tour_helper(lowerCamelCase_ , lowerCamelCase_ , curr + 1):
return True
UpperCamelCase__ : List[str] = 0
return False
def __UpperCAmelCase ( lowerCamelCase_) -> list[list[int]]:
UpperCamelCase__ : List[str] = [[0 for i in range(lowerCamelCase_)] for j in range(lowerCamelCase_)]
for i in range(lowerCamelCase_):
for j in range(lowerCamelCase_):
UpperCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(lowerCamelCase_ , (i, j) , 1):
return board
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = number
while duplicate > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(lowercase__ , 10 )
fact_sum += factorial(lowercase__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
_snake_case = int(input('''Enter number: ''').strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 580 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = hidden_states.shape
lowercase__ = jax.image.resize(
lowerCamelCase__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.in_channels if self.out_channels is None else self.out_channels
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = nn.Dense(lowerCamelCase__ , dtype=self.dtype )
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Dropout(self.dropout_prob )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase__ = None
if use_nin_shortcut:
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ) -> List[str]:
'''simple docstring'''
lowercase__ = hidden_states
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
lowercase__ = self.time_emb_proj(nn.swish(lowerCamelCase__ ) )
lowercase__ = jnp.expand_dims(jnp.expand_dims(lowerCamelCase__ , 1 ) , 1 )
lowercase__ = hidden_states + temb
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.dropout(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
if self.conv_shortcut is not None:
lowercase__ = self.conv_shortcut(lowerCamelCase__ )
return hidden_states + residual
| 325 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""llama"""
UpperCamelCase__ : str =["""past_key_values"""]
def __init__( self : Optional[int], __lowercase : str=3_2000, __lowercase : Union[str, Any]=4096, __lowercase : str=1_1008, __lowercase : List[str]=32, __lowercase : Optional[int]=32, __lowercase : Tuple=None, __lowercase : str="silu", __lowercase : Optional[int]=2048, __lowercase : Any=0.02, __lowercase : Optional[Any]=1e-6, __lowercase : Optional[int]=True, __lowercase : int=0, __lowercase : Optional[int]=1, __lowercase : str=2, __lowercase : List[Any]=1, __lowercase : Any=False, __lowercase : int=None, **__lowercase : Optional[int], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowercase__ = num_attention_heads
lowercase__ = num_key_value_heads
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = rms_norm_eps
lowercase__ = pretraining_tp
lowercase__ = use_cache
lowercase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, tie_word_embeddings=__lowercase, **__lowercase, )
def A__ ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase__ = self.rope_scaling.get("type", __lowercase )
lowercase__ = self.rope_scaling.get("factor", __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowercase, __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 37 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : str = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE : int = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ = extract_warnings(args.output_dir, args.targets)
lowercase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 74 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if curr_ind == len(_lowercase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowercase ) ):
if valid_connection(_lowercase , _lowercase , _lowercase , _lowercase ):
# Insert current vertex into path as next transition
lowerCamelCase_ : Any = next_ver
# Validate created path
if util_hamilton_cycle(_lowercase , _lowercase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase_ : Union[str, Any] = -1
return False
def lowercase_ ( _lowercase , _lowercase = 0 ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = [-1] * (len(_lowercase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase_ : str = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowercase , _lowercase , 1 ) else []
| 422 | 0 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase_ ( A_):
@wraps(A_)
def _inner_fn(*A_ ,**A_):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") ,A_ ,)
return fn(*A_ ,**A_)
return _inner_fn
| 221 |
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if principal <= 0:
raise Exception("Principal borrowed must be > 0")
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0")
if years_to_repay <= 0 or not isinstance(A_ ,A_):
raise Exception("Years to repay must be an integer > 0")
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__: str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__: Dict = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase = 1 , lowerCAmelCase = 10_00 ) -> int:
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Union[str, Any] = 0
for divide_by_number in range(lowerCAmelCase , digit + 1 ):
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : Dict = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = len(lowerCAmelCase )
UpperCAmelCase__ : List[str] = divide_by_number
else:
has_been_divided.append(lowerCAmelCase )
UpperCAmelCase__ : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCAmelCase__ : Dict = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ : Any = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ : Optional[Any] = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
import math
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return math.pow(UpperCAmelCase__ , 2 ) - a
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
return 2 * x
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = 2.0
while start <= a:
__lowerCAmelCase = math.pow(UpperCAmelCase__ , 2 )
return start
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ = 9_999 , UpperCAmelCase__ = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
__lowerCAmelCase = get_initial_point(UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ ):
__lowerCAmelCase = value
__lowerCAmelCase = value - fx(UpperCAmelCase__ , UpperCAmelCase__ ) / fx_derivative(UpperCAmelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_a )} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def A__ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(default=_a , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase =field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase =field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def A__ ( self ):
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( a__ ):
snake_case__ = '''vit_mae'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=2_24 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , _snake_case=5_12 , _snake_case=8 , _snake_case=20_48 , _snake_case=0.75 , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = decoder_num_attention_heads
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = decoder_num_hidden_layers
lowerCAmelCase = decoder_intermediate_size
lowerCAmelCase = mask_ratio
lowerCAmelCase = norm_pix_loss
| 4 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : int =[]
for line in lines:
_UpperCAmelCase : Dict =re.sub(R'#.*' , '' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
_UpperCAmelCase : str ='\n'.join(__lowerCamelCase )
# Make a hash from all this code
_UpperCAmelCase : str =full_str.encode('utf-8' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowercase ={
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase ={
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase ={'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowercase ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 446 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_A = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ) -> List[str]:
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def lowercase_ ( A__ , A__ , A__ , A__ = False ) -> str:
"""simple docstring"""
snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
snake_case = "cpu"
snake_case = Path(A__ )
# VAE DECODER
snake_case = AutoencoderKL.from_pretrained(model_path + "/vae" )
snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_A = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 705 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase_ ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
if isinstance(A__ , torch.Tensor ):
return image
elif isinstance(A__ , PIL.Image.Image ):
snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
snake_case = np.concatenate(A__ , axis=0 )
snake_case = np.array(A__ ).astype(np.floataa ) / 255.0
snake_case = image.transpose(0 , 3 , 1 , 2 )
snake_case = 2.0 * image - 1.0
snake_case = torch.from_numpy(A__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(A__ , dim=0 )
return image
def lowercase_ ( A__ , A__ , A__ , A__=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A__ , np.ndarray ):
snake_case = True
snake_case = va.device
snake_case = va.cpu().numpy()
snake_case = va.cpu().numpy()
snake_case = np.sum(va * va / (np.linalg.norm(A__ ) * np.linalg.norm(A__ )) )
if np.abs(A__ ) > DOT_THRESHOLD:
snake_case = (1 - t) * va + t * va
else:
snake_case = np.arccos(A__ )
snake_case = np.sin(A__ )
snake_case = theta_a * t
snake_case = np.sin(A__ )
snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case = sin_theta_t / sin_theta_a
snake_case = sa * va + sa * va
if inputs_are_torch:
snake_case = torch.from_numpy(A__ ).to(A__ )
return va
def lowercase_ ( A__ , A__ ) -> Any:
"""simple docstring"""
snake_case = F.normalize(A__ , dim=-1 )
snake_case = F.normalize(A__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
snake_case = value
class lowerCamelCase ( A_ ):
def __init__(self : str , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _A : CLIPFeatureExtractor , _A : List[Any]=None , _A : Tuple=None , _A : Union[str, Any]=None , ) -> Dict:
super().__init__()
self.register_modules(
vae=_A , text_encoder=_A , clip_model=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , coca_model=_A , coca_tokenizer=_A , coca_transform=_A , )
snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , _A )
else feature_extractor.size["shortest_edge"]
)
snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _A )
set_requires_grad(self.clip_model , _A )
def UpperCAmelCase(self : List[Any] , _A : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def UpperCAmelCase(self : str ) -> Any:
self.enable_attention_slicing(_A )
def UpperCAmelCase(self : List[Any] ) -> int:
set_requires_grad(self.vae , _A )
def UpperCAmelCase(self : Any ) -> Union[str, Any]:
set_requires_grad(self.vae , _A )
def UpperCAmelCase(self : Optional[Any] ) -> int:
set_requires_grad(self.unet , _A )
def UpperCAmelCase(self : str ) -> Tuple:
set_requires_grad(self.unet , _A )
def UpperCAmelCase(self : Tuple , _A : str , _A : str , _A : List[str] ) -> Dict:
# get the original timestep using init_timestep
snake_case = min(int(num_inference_steps * strength ) , _A )
snake_case = max(num_inference_steps - init_timestep , 0 )
snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase(self : List[Any] , _A : Dict , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=None ) -> str:
if not isinstance(_A , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(_A )}' )
snake_case = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ):
snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
snake_case = torch.cat(_A , dim=0 )
else:
snake_case = self.vae.encode(_A ).latent_dist.sample(_A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 0.1_82_15 * init_latents
snake_case = init_latents.repeat_interleave(_A , dim=0 )
snake_case = randn_tensor(init_latents.shape , generator=_A , device=_A , dtype=_A )
# get latents
snake_case = self.scheduler.add_noise(_A , _A , _A )
snake_case = init_latents
return latents
def UpperCAmelCase(self : Dict , _A : Optional[int] ) -> Union[str, Any]:
snake_case = self.coca_transform(_A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase(self : int , _A : Union[str, Any] , _A : Optional[Any] ) -> Tuple:
snake_case = self.feature_extractor.preprocess(_A )
snake_case = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case = self.clip_model.get_image_features(_A )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_A )
snake_case = image_embeddings_clip.repeat_interleave(_A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase(self : Union[str, Any] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : Any , _A : str , ) -> List[Any]:
snake_case = latents.detach().requires_grad_()
snake_case = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
snake_case = self.unet(_A , _A , encoder_hidden_states=_A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case = self.scheduler.alphas_cumprod[timestep]
snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case = torch.sqrt(_A )
snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _A ):
snake_case = self.scheduler.sigmas[index]
snake_case = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.1_82_15 * sample
snake_case = self.vae.decode(_A ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = transforms.Resize(self.feature_extractor_size )(_A )
snake_case = self.normalize(_A ).to(latents.dtype )
snake_case = self.clip_model.get_image_features(_A )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_A )
snake_case = spherical_dist_loss(_A , _A ).mean() * clip_guidance_scale
snake_case = -torch.autograd.grad(_A , _A )[0]
if isinstance(self.scheduler , _A ):
snake_case = latents.detach() + grads * (sigma**2)
snake_case = noise_pred_original
else:
snake_case = noise_pred_original - torch.sqrt(_A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self : str , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[int] = 5_1_2 , _A : Optional[int] = 5_1_2 , _A : float = 0.6 , _A : Optional[int] = 5_0 , _A : Optional[float] = 7.5 , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[float] = 1_0_0 , _A : Optional[torch.Generator] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : float = 0.8 , _A : float = 0.1 , _A : float = 0.1 , ) -> Any:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(_A )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(_A , torch.Generator ) and batch_size > 1:
snake_case = [generator] + [None] * (batch_size - 1)
snake_case = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
snake_case = [x[0] for x in coca_is_none if x[1]]
snake_case = ", ".join(_A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_A ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
snake_case = self.get_image_description(_A )
if style_prompt is None:
if len(_A ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
snake_case = self.get_image_description(_A )
# get prompt text embeddings for content and style
snake_case = self.tokenizer(
_A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_A , return_tensors="pt" , )
snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case = self.tokenizer(
_A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_A , return_tensors="pt" , )
snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case = slerp(_A , _A , _A )
# duplicate text embeddings for each generation per prompt
snake_case = text_embeddings.repeat_interleave(_A , dim=0 )
# set timesteps
snake_case = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case = {}
if accepts_offset:
snake_case = 1
self.scheduler.set_timesteps(_A , **_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case = self.get_timesteps(_A , _A , self.device )
snake_case = timesteps[:1].repeat(_A )
# Preprocess image
snake_case = preprocess(_A , _A , _A )
snake_case = self.prepare_latents(
_A , _A , _A , text_embeddings.dtype , self.device , _A )
snake_case = preprocess(_A , _A , _A )
snake_case = self.prepare_latents(
_A , _A , _A , text_embeddings.dtype , self.device , _A )
snake_case = slerp(_A , _A , _A )
if clip_guidance_scale > 0:
snake_case = self.get_clip_image_embeddings(_A , _A )
snake_case = self.get_clip_image_embeddings(_A , _A )
snake_case = slerp(
_A , _A , _A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case = content_text_input.input_ids.shape[-1]
snake_case = self.tokenizer([""] , padding="max_length" , max_length=_A , return_tensors="pt" )
snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case = uncond_embeddings.repeat_interleave(_A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case = torch.randn(_A , generator=_A , device="cpu" , dtype=_A ).to(
self.device )
else:
snake_case = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
# check if the scheduler accepts generator
snake_case = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case = generator
with self.progress_bar(total=_A ):
for i, t in enumerate(_A ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
snake_case = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case = self.cond_fn(
_A , _A , _A , _A , _A , _A , _A , )
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.1_82_15 * latents
snake_case = self.vae.decode(_A ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(_A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 294 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a ( UpperCAmelCase ):
_lowercase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 467 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
_UpperCAmelCase : Dict = b.T
_UpperCAmelCase : Dict = np.sum(np.square(lowerCAmelCase ) , axis=1 )
_UpperCAmelCase : Optional[Any] = np.sum(np.square(lowerCAmelCase ) , axis=0 )
_UpperCAmelCase : str = np.matmul(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict ) -> int:
_UpperCAmelCase : Any = x.reshape(-1 , 3 )
_UpperCAmelCase : List[str] = squared_euclidean_distance(lowerCAmelCase , lowerCAmelCase )
return np.argmin(lowerCAmelCase , axis=1 )
class a ( UpperCAmelCase ):
_lowercase = ["pixel_values"]
def __init__( self , A_ = None , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
_UpperCAmelCase : Optional[int] = get_size_dict(A_ )
_UpperCAmelCase : Union[str, Any] = np.array(A_ ) if clusters is not None else None
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Optional[Any] = resample
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : List[str] = do_color_quantize
def _UpperCAmelCase ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : Dict = rescale(image=A_ , scale=1 / 1_27.5 , data_format=A_ )
_UpperCAmelCase : List[Any] = image - 1
return image
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : Dict = get_size_dict(A_ )
_UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase : Any = clusters if clusters is not None else self.clusters
_UpperCAmelCase : Optional[int] = np.array(A_ )
_UpperCAmelCase : List[str] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[str] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_normalize:
_UpperCAmelCase : List[str] = [self.normalize(image=A_ ) for image in images]
if do_color_quantize:
_UpperCAmelCase : Tuple = [to_channel_dimension_format(A_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase : List[str] = np.array(A_ )
_UpperCAmelCase : List[Any] = color_quantize(A_ , A_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase : Any = images.shape[0]
_UpperCAmelCase : List[Any] = images.reshape(A_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase : Union[str, Any] = list(A_ )
else:
_UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_UpperCAmelCase : List[Any] = {"input_ids": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 467 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A_ : Optional[Any] = getLogger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 1_0_2_4 , SCREAMING_SNAKE_CASE="val" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="summarization" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = Path(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).cuda()
if fpaa:
__UpperCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # update config with task specific params
__UpperCAmelCase = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase = num_return_sequences
__UpperCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase = SeqaSeqDataset(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_target_length=1_0_2_4 , type_path=SCREAMING_SNAKE_CASE , n_obs=SCREAMING_SNAKE_CASE , prefix=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , add_extra_examples=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn )
__UpperCAmelCase = []
for batch in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=SCREAMING_SNAKE_CASE , num_beams=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase = chunks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results, sampler.num_replicas
def __a ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=SCREAMING_SNAKE_CASE , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--type_path''' , type=SCREAMING_SNAKE_CASE , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE , default=8 , required=SCREAMING_SNAKE_CASE , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=SCREAMING_SNAKE_CASE , default=-1 , required=SCREAMING_SNAKE_CASE , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=SCREAMING_SNAKE_CASE , default=1 , required=SCREAMING_SNAKE_CASE , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=SCREAMING_SNAKE_CASE , default=6_0_0 , required=SCREAMING_SNAKE_CASE , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase = time.time()
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
__UpperCAmelCase = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
__UpperCAmelCase = Path(args.save_dir + '''_tmp''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) # this handles locking.
__UpperCAmelCase = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase = {}
if args.src_lang is not None:
__UpperCAmelCase = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = eval_data_dir(
args.data_dir , SCREAMING_SNAKE_CASE , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if args.local_rank <= 0:
__UpperCAmelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = gather_results_from_each_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.sync_timeout )
__UpperCAmelCase = combine_partial_results(SCREAMING_SNAKE_CASE )
if args.num_return_sequences > 1:
__UpperCAmelCase = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
__UpperCAmelCase = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(SCREAMING_SNAKE_CASE ) as f:
__UpperCAmelCase = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase = '''translation''' in args.task
__UpperCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase = score_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = time.time() - start_time
__UpperCAmelCase = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
write_txt_file(SCREAMING_SNAKE_CASE , save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE , save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List:
'''simple docstring'''
__UpperCAmelCase = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["id"] )
__UpperCAmelCase = [x['''pred'''] for x in records]
return preds
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Dict[str, List]]:
'''simple docstring'''
# WAIT FOR lots of .json files
__UpperCAmelCase = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase = list(save_dir.glob('''rank_*.json''' ) )
if len(SCREAMING_SNAKE_CASE ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase = lmap(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 303 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = CustomTokenizer
pass
| 303 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowercase_ ( __A : Any ) -> Tuple:
"""simple docstring"""
lowercase : int =VideoMAEConfig()
set_architecture_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "finetuned" not in model_name:
lowercase : List[Any] =False
if "finetuned" in model_name:
lowercase : Union[str, Any] ='''huggingface/label-files'''
if "kinetics" in model_name:
lowercase : str =4_0_0
lowercase : Any ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowercase : Union[str, Any] =1_7_4
lowercase : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowercase : Optional[Any] =json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Dict ={int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase : int =idalabel
lowercase : Optional[Any] ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
if "small" in model_name:
lowercase : Dict =3_8_4
lowercase : Optional[Any] =1_5_3_6
lowercase : Union[str, Any] =1_2
lowercase : List[str] =1_6
lowercase : Union[str, Any] =1_2
lowercase : Tuple =3
lowercase : str =1_9_2
lowercase : List[str] =7_6_8
elif "large" in model_name:
lowercase : Any =1_0_2_4
lowercase : Tuple =4_0_9_6
lowercase : List[Any] =2_4
lowercase : Tuple =1_6
lowercase : int =1_2
lowercase : List[Any] =8
lowercase : List[Any] =5_1_2
lowercase : Optional[Any] =2_0_4_8
elif "huge" in model_name:
lowercase : Any =1_2_8_0
lowercase : Union[str, Any] =5_1_2_0
lowercase : Dict =3_2
lowercase : Optional[int] =1_6
lowercase : List[str] =1_2
lowercase : Optional[int] =8
lowercase : Optional[int] =6_4_0
lowercase : Union[str, Any] =2_5_6_0
elif "base" not in model_name:
raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' )
def lowercase_ ( __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder." in name:
lowercase : List[Any] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowercase : int =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowercase : Union[str, Any] =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase : int =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Any =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : int =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowercase : str =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase : Union[str, Any] =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowercase : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowercase : str =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowercase : int =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : List[Any] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase : Tuple =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase : Optional[int] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase : Tuple =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowercase : int =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowercase : List[str] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowercase : Tuple =name.replace('''head''' , '''classifier''' )
return name
def lowercase_ ( __A : List[Any] , __A : Tuple ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : List[Any] =orig_state_dict.pop(lowerCAmelCase__ )
if key.startswith('''encoder.''' ):
lowercase : Optional[Any] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowercase : int =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowercase : Optional[int] =config.decoder_hidden_size
lowercase : List[Any] =int(key_split[2] )
lowercase : Union[str, Any] ='''decoder.decoder_layers.'''
if "weight" in key:
lowercase : Optional[int] =val[:dim, :]
lowercase : Optional[Any] =val[dim : dim * 2, :]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : Tuple =config.hidden_size
lowercase : Any =int(key_split[1] )
lowercase : Optional[int] ='''videomae.encoder.layer.'''
if "weight" in key:
lowercase : Optional[Any] =val[:dim, :]
lowercase : Tuple =val[dim : dim * 2, :]
lowercase : str =val[-dim:, :]
else:
lowercase : int =val
return orig_state_dict
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
lowercase : Tuple =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase : Tuple =np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def lowercase_ ( __A : int , __A : Optional[Any] , __A : Any , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple =get_videomae_config(lowerCAmelCase__ )
if "finetuned" in model_name:
lowercase : str =VideoMAEForVideoClassification(lowerCAmelCase__ )
else:
lowercase : List[str] =VideoMAEForPreTraining(lowerCAmelCase__ )
# download original checkpoint, hosted on Google Drive
lowercase : Any ='''pytorch_model.bin'''
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
lowercase : str =torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in files:
lowercase : List[str] =files['''model''']
else:
lowercase : Optional[int] =files['''module''']
lowercase : Tuple =convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify model on basic input
lowercase : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowercase : Any =prepare_video()
lowercase : int =image_processor(lowerCAmelCase__ , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowercase : Dict =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase : Optional[Any] =torch.load(lowerCAmelCase__ )
lowercase : Any =model(**lowerCAmelCase__ )
lowercase : Union[str, Any] =outputs.logits
lowercase : Tuple =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowercase : str =torch.Size([1, 4_0_0] )
lowercase : List[Any] =torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowercase : str =torch.Size([1, 1_7_4] )
lowercase : Union[str, Any] =torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowercase : Tuple =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Optional[Any] =torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowercase : Tuple =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Any =torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowercase : List[str] =torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowercase : Dict =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Dict =torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowercase : int =torch.Size([1, 4_0_0] )
lowercase : str =torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowercase : List[Any] =torch.Size([1, 4_0_0] )
lowercase : Tuple =torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowercase : Union[str, Any] =torch.Size([1, 4_0_0] )
lowercase : Optional[Any] =torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowercase : List[Any] =torch.Size([1, 4_0_0] )
lowercase : List[str] =torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowercase : Optional[int] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : str =torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowercase : Tuple =torch.Size([1, 1_7_4] )
lowercase : str =torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowercase : List[Any] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Optional[Any] =torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowercase : Any =torch.Size([1, 1_7_4] )
lowercase : List[Any] =torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowercase : Optional[int] =outputs.loss
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_(__UpperCAmelCase ):
"""simple docstring"""
a_ : Dict = 42
class A_(__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , A = 6_5536 , A = None , A = 2 , A = 2 , A = 0 , A = "fourier" , A = True , A = False , A = 0.0 , A = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A = "UNetMidBlock1D" , A = None , A = (32, 32, 64) , A = None , A = 8 , A = 1 , A = False , ):
super().__init__()
_lowerCamelCase : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCamelCase__ , log=UpperCamelCase__ , flip_sin_to_cos=UpperCamelCase__ )
_lowerCamelCase : Dict = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCamelCase__ , downscale_freq_shift=UpperCamelCase__ )
_lowerCamelCase : Tuple = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : Union[str, Any] = block_out_channels[0] * 4
_lowerCamelCase : Optional[int] = TimestepEmbedding(
in_channels=UpperCamelCase__ , time_embed_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ , out_dim=block_out_channels[0] , )
_lowerCamelCase : Optional[int] = nn.ModuleList([] )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = nn.ModuleList([] )
_lowerCamelCase : List[Any] = None
# down
_lowerCamelCase : str = in_channels
for i, down_block_type in enumerate(UpperCamelCase__ ):
_lowerCamelCase : List[Any] = output_channel
_lowerCamelCase : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : Dict = i == len(UpperCamelCase__ ) - 1
_lowerCamelCase : Optional[Any] = get_down_block(
UpperCamelCase__ , num_layers=UpperCamelCase__ , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCamelCase__ )
# mid
_lowerCamelCase : List[str] = get_mid_block(
UpperCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCamelCase__ , add_downsample=UpperCamelCase__ , )
# up
_lowerCamelCase : Optional[Any] = list(reversed(UpperCamelCase__ ) )
_lowerCamelCase : Tuple = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : Union[str, Any] = out_channels
else:
_lowerCamelCase : Optional[int] = block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
_lowerCamelCase : Union[str, Any] = output_channel
_lowerCamelCase : Optional[int] = (
reversed_block_out_channels[i + 1] if i < len(UpperCamelCase__ ) - 1 else final_upsample_channels
)
_lowerCamelCase : Any = i == len(UpperCamelCase__ ) - 1
_lowerCamelCase : Union[str, Any] = get_up_block(
UpperCamelCase__ , num_layers=UpperCamelCase__ , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCamelCase__ )
_lowerCamelCase : List[str] = output_channel
# out
_lowerCamelCase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase : Any = get_out_block(
out_block_type=UpperCamelCase__ , num_groups_out=UpperCamelCase__ , embed_dim=block_out_channels[0] , out_channels=UpperCamelCase__ , act_fn=UpperCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def _lowerCAmelCase ( self , A , A , A = True , ):
_lowerCamelCase : Dict = timestep
if not torch.is_tensor(UpperCamelCase__ ):
_lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
_lowerCamelCase : List[str] = timesteps[None].to(sample.device )
_lowerCamelCase : Union[str, Any] = self.time_proj(UpperCamelCase__ )
if self.config.use_timestep_embedding:
_lowerCamelCase : List[str] = self.time_mlp(UpperCamelCase__ )
else:
_lowerCamelCase : Dict = timestep_embed[..., None]
_lowerCamelCase : Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase : Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase : Dict = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase : List[str] = downsample_block(hidden_states=UpperCamelCase__ , temb=UpperCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : List[str] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase : List[Any] = down_block_res_samples[-1:]
_lowerCamelCase : str = down_block_res_samples[:-1]
_lowerCamelCase : List[str] = upsample_block(UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , temb=UpperCamelCase__ )
# 5. post-process
if self.out_block:
_lowerCamelCase : List[str] = self.out_block(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCamelCase__ )
| 437 |
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( _A ):
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __UpperCamelCase ( _A ):
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __UpperCamelCase ( _A ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_A = 1
while K:
_A = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_A = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 431 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A__ :
def __init__( self , lowerCamelCase , lowerCamelCase=14 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Dict = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : str = seq_length
__magic_name__ : Tuple = is_training
__magic_name__ : Any = use_token_type_ids
__magic_name__ : Any = use_input_mask
__magic_name__ : List[str] = use_labels
__magic_name__ : Tuple = use_mc_token_ids
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : int = type_sequence_label_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : Optional[int] = num_labels
__magic_name__ : Dict = num_choices
__magic_name__ : List[str] = scope
__magic_name__ : List[Any] = self.vocab_size - 1
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[int] = None
if self.use_input_mask:
__magic_name__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : int = None
if self.use_mc_token_ids:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__magic_name__ : Any = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : Any = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = self.get_config()
__magic_name__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ) -> Any:
"""simple docstring"""
__magic_name__ : Dict = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase , token_type_ids=lowerCamelCase , head_mask=lowerCamelCase )
model(lowerCamelCase , token_type_ids=lowerCamelCase )
__magic_name__ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Dict = config_and_inputs
__magic_name__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = self.num_labels
__magic_name__ : Any = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : int =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] =(CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ : str =(
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : List[str] =False
lowerCamelCase__ : int =False
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Dict = CTRLModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCamelCase , n_embd=37 )
def lowercase ( self ) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : Any = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
__magic_name__ : List[str] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=lowerCamelCase ) # Legal the president is
__magic_name__ : str = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__magic_name__ : Tuple = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase )
| 336 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = BigBirdConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__magic_name__ : Tuple = BigBirdForQuestionAnswering(UpperCAmelCase )
else:
__magic_name__ : Dict = BigBirdForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase, UpperCAmelCase, is_trivia_qa=UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 336 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """BlipImageProcessor"""
__snake_case = """AutoTokenizer"""
def __init__( self: Optional[Any] , a: Union[str, Any] , a: List[Any] ):
__lowerCamelCase : int = False
super().__init__(snake_case_ , snake_case_ )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self: Optional[int] , a: List[Any] = None , a: Any = None , a: int = True , a: List[str] = False , a: int = None , a: Union[str, Any] = None , a: Union[str, Any] = 0 , a: Any = None , a: Optional[int] = None , a: Tuple = False , a: Optional[int] = False , a: Union[str, Any] = False , a: Optional[Any] = False , a: str = False , a: List[Any] = True , a: Dict = None , **a: List[str] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Optional[int] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
__lowerCamelCase : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
__lowerCamelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def _snake_case ( self: Optional[int] , *a: Optional[int] , **a: Optional[int] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _snake_case ( self: Optional[Any] , *a: List[str] , **a: str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Optional[int] = self.tokenizer.model_input_names
__lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = "evil space-punk bird"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
_a = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 131 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase__ ( __A , __A ):
@register_to_config
def __init__( self , _lowercase = 768 , ):
super().__init__()
lowerCAmelCase_ : Optional[int] = nn.Parameter(torch.zeros(1 , _lowercase ) )
lowerCAmelCase_ : Dict = nn.Parameter(torch.ones(1 , _lowercase ) )
def UpperCAmelCase__ ( self , _lowercase = None , _lowercase = None , ):
lowerCAmelCase_ : Any = nn.Parameter(self.mean.to(_lowercase ).to(_lowercase ) )
lowerCAmelCase_ : str = nn.Parameter(self.std.to(_lowercase ).to(_lowercase ) )
return self
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Optional[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Tuple = (embeds * self.std) + self.mean
return embeds
| 440 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( __A ):
__UpperCamelCase = """M-CLIP"""
def __init__( self , _lowercase=1_024 , _lowercase=768 , **_lowercase ):
lowerCAmelCase_ : Tuple = transformerDimSize
lowerCAmelCase_ : Optional[Any] = imageDimSize
super().__init__(**_lowercase )
class lowercase__ ( __A ):
__UpperCamelCase = MCLIPConfig
def __init__( self , _lowercase , *_lowercase , **_lowercase ):
super().__init__(_lowercase , *_lowercase , **_lowercase )
lowerCAmelCase_ : str = XLMRobertaModel(_lowercase )
lowerCAmelCase_ : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = self.transformer(input_ids=_lowercase , attention_mask=_lowercase )[0]
lowerCAmelCase_ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowercase ), embs
| 440 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int = 10**12 ) -> int:
UpperCamelCase__ : str = 1
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Any = 1
UpperCamelCase__ : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 253 |
from math import sqrt
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase__ : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase__ : str = False
for divisor in range(2 , int(round(sqrt(__UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase__ : Tuple = False
break
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'status' must been from type bool"
return status
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase__ : Dict = list(range(2 , n + 1 ) )
UpperCamelCase__ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + 1 , len(__UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase__ : int = 0
# filters actual prime numbers.
UpperCamelCase__ : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase__ : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__UpperCAmelCase ):
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[int]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase__ : List[str] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : Optional[Any] = number
if number == 0 or number == 1:
ans.append(__UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__UpperCAmelCase ):
while quotient != 1:
if is_prime(__UpperCAmelCase ) and (quotient % factor == 0):
ans.append(__UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Optional[int]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase__ : Tuple = 0
# prime factorization of 'number'
UpperCamelCase__ : Dict = prime_factorization(__UpperCAmelCase )
UpperCamelCase__ : List[str] = max(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> List[str]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase__ : int = 0
# prime factorization of 'number'
UpperCamelCase__ : Dict = prime_factorization(__UpperCAmelCase )
UpperCamelCase__ : int = min(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[str]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Any:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[Any]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (number > 2) and is_even(__UpperCAmelCase )
), "'number' must been an int, even and > 2"
UpperCamelCase__ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase__ : Any = get_prime_numbers(__UpperCAmelCase )
UpperCamelCase__ : str = len(__UpperCAmelCase )
# run variable for while-loops.
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Dict = None
# exit variable. for break up the loops
UpperCamelCase__ : Dict = True
while i < len_pn and loop:
UpperCamelCase__ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase__ : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (len(__UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any] ) -> Dict:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase__ : Union[str, Any] = 0
while numbera != 0:
UpperCamelCase__ : List[Any] = numbera % numbera
UpperCamelCase__ : Tuple = numbera
UpperCamelCase__ : str = rest
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] ) -> str:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase__ : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase__ : Optional[Any] = prime_factorization(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = prime_factorization(__UpperCAmelCase )
elif numbera == 1 or numbera == 1:
UpperCamelCase__ : str = []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[Any] = max(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase__ : Any = prime_fac_a.count(__UpperCAmelCase )
UpperCamelCase__ : Tuple = prime_fac_a.count(__UpperCAmelCase )
for _ in range(max(__UpperCAmelCase , __UpperCAmelCase ) ):
ans *= n
else:
UpperCamelCase__ : Dict = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase__ : int = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase__ : str = 0
UpperCamelCase__ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and is_prime(
__UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Dict ) -> List[str]:
assert (
is_prime(__UpperCAmelCase ) and is_prime(__UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase__ : str = p_number_a + 1 # jump to the next number
UpperCamelCase__ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(__UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(__UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase__ : str = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(__UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[str]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase__ : Union[str, Any] = get_divisors(__UpperCAmelCase )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(__UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] ) -> Optional[Any]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase__ : Tuple = gcd(abs(__UpperCAmelCase ) , abs(__UpperCAmelCase ) )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase__ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Any:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Any = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase__ : Optional[Any] = ans
ans += fiba
UpperCamelCase__ : Union[str, Any] = tmp
return ans
| 253 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->float:
return base * power(UpperCAmelCase__, (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
A_ = int(input('''Enter the base: ''').strip())
A_ = int(input('''Enter the exponent: ''').strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 498 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = None
@staticmethod
def _UpperCamelCase ( ):
'''simple docstring'''
raise NotImplementedError
def _UpperCamelCase ( self : str , snake_case : int , snake_case : int , snake_case : str , **snake_case : Any ):
'''simple docstring'''
raise NotImplementedError
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
raise NotImplementedError
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ):
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'optuna'
@staticmethod
def _UpperCamelCase ( ):
'''simple docstring'''
return is_optuna_available()
def _UpperCamelCase ( self : str , snake_case : str , snake_case : int , snake_case : str , **snake_case : Any ):
'''simple docstring'''
return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case )
def _UpperCamelCase ( self : Optional[Any] , snake_case : int ):
'''simple docstring'''
return default_hp_space_optuna(snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'ray'
snake_case_ = '\'ray[tune]\''
@staticmethod
def _UpperCamelCase ( ):
'''simple docstring'''
return is_ray_available()
def _UpperCamelCase ( self : Any , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : List[Any] ):
'''simple docstring'''
return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case )
def _UpperCamelCase ( self : Dict , snake_case : Dict ):
'''simple docstring'''
return default_hp_space_ray(snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'sigopt'
@staticmethod
def _UpperCamelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def _UpperCamelCase ( self : str , snake_case : str , snake_case : int , snake_case : str , **snake_case : Union[str, Any] ):
'''simple docstring'''
return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return default_hp_space_sigopt(snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'wandb'
@staticmethod
def _UpperCamelCase ( ):
'''simple docstring'''
return is_wandb_available()
def _UpperCamelCase ( self : int , snake_case : int , snake_case : int , snake_case : str , **snake_case : Any ):
'''simple docstring'''
return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Union[str, Any] ):
'''simple docstring'''
return default_hp_space_wandb(snake_case )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _lowerCAmelCase ( ) ->str:
A__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase__ ) > 0:
A__ : Dict = available_backends[0].name
if len(UpperCAmelCase__ ) > 1:
logger.info(
f'{len(UpperCAmelCase__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 498 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ (__a , __a , __a , __a , __a=True , __a="pt" ):
'''simple docstring'''
A_ = {"add_prefix_space": True} if isinstance(__a , __a ) and not line.startswith(" " ) else {}
A_ = padding_side
return tokenizer(
[line] , max_length=__a , padding="max_length" if pad_to_max_length else None , truncation=__a , return_tensors=__a , add_special_tokens=__a , **__a , )
def A_ (__a , __a , __a=None , ):
'''simple docstring'''
A_ = input_ids.ne(__a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Dict="train" , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Any="" , ) -> List[str]:
"""simple docstring"""
super().__init__()
A_ = Path(_snake_case ).joinpath(type_path + ".source" )
A_ = Path(_snake_case ).joinpath(type_path + ".target" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , _snake_case : Optional[int] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , _snake_case ).rstrip("\n" )
A_ = linecache.getline(str(self.tgt_file ) , _snake_case ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _snake_case ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , _snake_case ) else self.tokenizer
A_ = encode_line(_snake_case , _snake_case , self.max_source_length , "right" )
A_ = encode_line(_snake_case , _snake_case , self.max_target_length , "right" )
A_ = source_inputs["input_ids"].squeeze()
A_ = target_inputs["input_ids"].squeeze()
A_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase__ ( _snake_case : List[Any] ) -> str:
"""simple docstring"""
return [len(_snake_case ) for x in Path(_snake_case ).open().readlines()]
def lowerCamelCase__ ( self : Any , _snake_case : List[str] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ = torch.stack([x["input_ids"] for x in batch] )
A_ = torch.stack([x["attention_mask"] for x in batch] )
A_ = torch.stack([x["decoder_input_ids"] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _snake_case )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _snake_case )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(_snake_case , _snake_case )
A_ , A_ = trim_batch(_snake_case , _snake_case , attention_mask=_snake_case )
A_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ : Any = getLogger(__name__)
def A_ (__a ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__a ) )
def A_ (__a ):
'''simple docstring'''
A_ = get_git_info()
save_json(__a , os.path.join(__a , "git_log.json" ) )
def A_ (__a , __a , __a=4 , **__a ):
'''simple docstring'''
with open(__a , "w" ) as f:
json.dump(__a , __a , indent=__a , **__a )
def A_ (__a ):
'''simple docstring'''
with open(__a ) as f:
return json.load(__a )
def A_ ():
'''simple docstring'''
A_ = git.Repo(search_parent_directories=__a )
A_ = {
"repo_id": str(__a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def A_ (__a , __a ):
'''simple docstring'''
return list(map(__a , __a ) )
def A_ (__a , __a ):
'''simple docstring'''
with open(__a , "wb" ) as f:
return pickle.dump(__a , __a )
def A_ (__a ):
'''simple docstring'''
def remove_articles(__a ):
return re.sub(R"\b(a|an|the)\b" , " " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def A_ (__a , __a ):
'''simple docstring'''
A_ = normalize_answer(__a ).split()
A_ = normalize_answer(__a ).split()
A_ = Counter(__a ) & Counter(__a )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(__a )
A_ = 1.0 * num_same / len(__a )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def A_ (__a , __a ):
'''simple docstring'''
return normalize_answer(__a ) == normalize_answer(__a )
def A_ (__a , __a ):
'''simple docstring'''
assert len(__a ) == len(__a )
A_ = 0
for hypo, pred in zip(__a , __a ):
em += exact_match_score(__a , __a )
if len(__a ) > 0:
em /= len(__a )
return {"em": em}
def A_ (__a ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = "dropout_rate"
for p in extra_params:
if getattr(__a , __a , __a ):
if not hasattr(__a , __a ) and not hasattr(__a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__a ) )
delattr(__a , __a )
continue
A_ = p if hasattr(__a , __a ) else equivalent_param[p]
setattr(__a , __a , getattr(__a , __a ) )
delattr(__a , __a )
return hparams, config
| 115 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : Any = logging.get_logger(__name__)
def A_ (__a ):
'''simple docstring'''
A_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A_ = 128
elif "12-12" in model_name:
A_ = 12
A_ = 12
elif "14-14" in model_name:
A_ = 14
A_ = 14
elif "16-16" in model_name:
A_ = 16
A_ = 16
else:
raise ValueError("Model not supported" )
A_ = "huggingface/label-files"
if "speech-commands" in model_name:
A_ = 35
A_ = "speech-commands-v2-id2label.json"
else:
A_ = 527
A_ = "audioset-id2label.json"
A_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
A_ = {int(__a ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def A_ (__a ):
'''simple docstring'''
if "module.v" in name:
A_ = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
A_ = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
A_ = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
A_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
A_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
A_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A_ = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
A_ = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
A_ = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def A_ (__a , __a ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(__a )
if "qkv" in key:
A_ = key.split("." )
A_ = int(key_split[3] )
A_ = config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = val
return orig_state_dict
def A_ (__a ):
'''simple docstring'''
A_ = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
@torch.no_grad()
def A_ (__a , __a , __a=False ):
'''simple docstring'''
A_ = get_audio_spectrogram_transformer_config(__a )
A_ = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
A_ = model_name_to_url[model_name]
A_ = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )
# remove some keys
remove_keys(__a )
# rename some keys
A_ = convert_state_dict(__a , __a )
# load 🤗 model
A_ = ASTForAudioClassification(__a )
model.eval()
model.load_state_dict(__a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A_ = -4.2677393 if "speech-commands" not in model_name else -6.845978
A_ = 4.5689974 if "speech-commands" not in model_name else 5.5654526
A_ = 1024 if "speech-commands" not in model_name else 128
A_ = ASTFeatureExtractor(mean=__a , std=__a , max_length=__a )
if "speech-commands" in model_name:
A_ = load_dataset("speech_commands" , "v0.02" , split="validation" )
A_ = dataset[0]["audio"]["array"]
else:
A_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
A_ , A_ = torchaudio.load(__a )
A_ = waveform.squeeze().numpy()
A_ = feature_extractor(__a , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
A_ = model(**__a )
A_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A_ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A_ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A_ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A_ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A_ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A_ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A_ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A_ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , __a , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__a )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 115 | 1 |
"""simple docstring"""
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowercase = ord(str(i))
def lowerCAmelCase ():
"""simple docstring"""
if os.name == "nt":
import msvcrt
__UpperCamelCase ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCamelCase ) == 0:
# Read the keystroke
__UpperCamelCase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__UpperCamelCase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__UpperCamelCase =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__UpperCamelCase )
if ord(__UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
__UpperCamelCase =chr(KEYMAP['''esc'''] )
except KeyError:
__UpperCamelCase =cha[1]
else:
__UpperCamelCase =ch.decode(__UpperCamelCase )
else:
__UpperCamelCase =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__UpperCamelCase =sys.stdin.fileno()
__UpperCamelCase =termios.tcgetattr(__UpperCamelCase )
try:
tty.setraw(__UpperCamelCase )
__UpperCamelCase =sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCamelCase , termios.TCSADRAIN , __UpperCamelCase )
return ch
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =get_raw_chars()
if ord(__UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCamelCase ) == KEYMAP["esc"]:
__UpperCamelCase =get_raw_chars()
if ord(__UpperCamelCase ) == KEYMAP["mod_int"]:
__UpperCamelCase =get_raw_chars()
if ord(__UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 296 | """simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( __a ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase__ : ArgumentParser ) -> str:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
raise NotImplementedError()
| 296 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowerCamelCase ( ) -> Any:
lowerCamelCase_ : Optional[Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase_ : Tuple = Dataset.from_dict(A__ )
return dataset
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def _lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
lowerCamelCase_ : int = get_dataset()
lowerCamelCase_ : str = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCAmelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase_ : Any = get_dataset()
lowerCamelCase_, lowerCamelCase_ : List[str] = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __a )
| 278 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int , __a : str=13 , __a : Tuple=7 , __a : int=True , __a : int=True , __a : Dict=True , __a : str=True , __a : List[str]=99 , __a : Dict=64 , __a : Optional[Any]=32 , __a : List[Any]=5 , __a : Optional[int]=4 , __a : str=37 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : int=512 , __a : Optional[Any]=16 , __a : Any=2 , __a : Dict=0.02 , __a : str=3 , __a : List[Any]=4 , __a : List[str]=None , ) ->Optional[int]:
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Any = seq_length
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : int = use_input_mask
lowerCamelCase_ : int = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Any = embedding_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Dict = type_vocab_size
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Union[str, Any] = num_labels
lowerCamelCase_ : List[Any] = num_choices
lowerCamelCase_ : Dict = scope
def _lowerCAmelCase ( self : int ) ->str:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Tuple = None
if self.use_input_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Tuple , __a : int , __a : List[str] , __a : Tuple , __a : Any , __a : Union[str, Any] , __a : int , __a : Any ) ->Tuple:
lowerCamelCase_ : Tuple = MobileBertModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase_ : Optional[int] = model(__a , token_type_ids=__a )
lowerCamelCase_ : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Dict , __a : List[Any] , __a : List[str] ) ->Tuple:
lowerCamelCase_ : List[Any] = MobileBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict ) ->int:
lowerCamelCase_ : Tuple = MobileBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Any = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : str , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Dict ) ->List[Any]:
lowerCamelCase_ : Optional[int] = MobileBertForPreTraining(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : List[str] , __a : Tuple , __a : int , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] , __a : Any ) ->List[str]:
lowerCamelCase_ : Dict = MobileBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : str , __a : int , __a : Dict , __a : Dict , __a : List[Any] , __a : str ) ->Tuple:
lowerCamelCase_ : Dict = self.num_labels
lowerCamelCase_ : Optional[Any] = MobileBertForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : Optional[int] , __a : int , __a : str , __a : str , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : List[str] = MobileBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : Tuple , __a : Dict , __a : Dict , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) ->List[str]:
lowerCamelCase_ : Any = self.num_choices
lowerCamelCase_ : int = MobileBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ (a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def _lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : str=False ) ->Any:
lowerCamelCase_ : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
lowerCamelCase_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
lowerCamelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : List[Any] = MobileBertModelTester(self )
lowerCamelCase_ : int = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ) ->Any:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a )
def _lowerCAmelCase ( self : List[str] ) ->Tuple:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a )
def _lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a )
def _lowerCAmelCase ( self : Any ) ->List[Any]:
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a )
def _lowerCAmelCase ( self : Optional[int] ) ->str:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a )
def _lowerCAmelCase ( self : str ) ->Optional[Any]:
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a )
def _lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a )
def __lowerCamelCase ( A__ : List[str] ) -> Optional[int]:
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
snake_case__ : List[str] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(__a )
lowerCamelCase_ : int = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(__a )[0]
lowerCamelCase_ : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __a )
lowerCamelCase_ : str = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 278 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=snake_case )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case )
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ,snake_case : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case ) ).to(snake_case )
SCREAMING_SNAKE_CASE =image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case )
else:
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case )
SCREAMING_SNAKE_CASE ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**snake_case )
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE =sd_pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**snake_case )
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE ='french fries'
SCREAMING_SNAKE_CASE =sd_pipe(**snake_case ,negative_prompt=snake_case )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**snake_case )
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE =[inputs['prompt']] * 2
SCREAMING_SNAKE_CASE =np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE =torch.from_numpy(snake_case ).unsqueeze(0 ).to(snake_case )
SCREAMING_SNAKE_CASE =image / 2 + 0.5
SCREAMING_SNAKE_CASE =image.permute(0 ,3 ,1 ,2 )
SCREAMING_SNAKE_CASE =image.repeat(2 ,1 ,1 ,1 )
SCREAMING_SNAKE_CASE =sd_pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**snake_case )
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE =sd_pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =[round(snake_case ,4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline(**snake_case )
SCREAMING_SNAKE_CASE =VaeImageProcessor(do_resize=snake_case ,do_normalize=snake_case )
SCREAMING_SNAKE_CASE =pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs_by_type(snake_case ,input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE =components['vae']
SCREAMING_SNAKE_CASE =self.get_dummy_inputs_by_type(snake_case ,input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE =vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE =pipe(**snake_case )[0]
SCREAMING_SNAKE_CASE =np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case ,1e-4 ,'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ,snake_case : Any=0 ):
SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE ={
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=snake_case )
SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=snake_case )
SCREAMING_SNAKE_CASE =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**snake_case ).images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =0
def callback_fn(snake_case : int ,snake_case : int ,snake_case : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=snake_case ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE =pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =self.get_inputs()
pipe(**snake_case ,callback=snake_case ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCAmelCase ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=snake_case ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE =pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE =self.get_inputs()
SCREAMING_SNAKE_CASE =pipe(**snake_case )
SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE =inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE ='timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE =StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case ,safety_checker=snake_case ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE =pipe(**snake_case )
SCREAMING_SNAKE_CASE =output.images[0]
SCREAMING_SNAKE_CASE =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 252 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'encoder-decoder'
__UpperCAmelCase = True
def __init__( self : Dict ,**snake_case : Any ):
super().__init__(**snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE =kwargs.pop('encoder' )
SCREAMING_SNAKE_CASE =encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE =kwargs.pop('decoder' )
SCREAMING_SNAKE_CASE =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =AutoConfig.for_model(snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =True
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ,snake_case : PretrainedConfig ,snake_case : PretrainedConfig ,**snake_case : str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.encoder.to_dict()
SCREAMING_SNAKE_CASE =self.decoder.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 252 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A :
UpperCamelCase__ : Optional[Any] =PegasusConfig
UpperCamelCase__ : List[str] ={}
UpperCamelCase__ : Any ='gelu'
def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int=13 , lowercase_ : List[str]=7 , lowercase_ : Union[str, Any]=True , lowercase_ : int=False , lowercase_ : Union[str, Any]=99 , lowercase_ : Any=32 , lowercase_ : Any=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=20 , lowercase_ : Tuple=2 , lowercase_ : Any=1 , lowercase_ : Any=0 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =parent
_lowerCamelCase : Any =batch_size
_lowerCamelCase : Optional[Any] =seq_length
_lowerCamelCase : List[str] =is_training
_lowerCamelCase : Dict =use_labels
_lowerCamelCase : str =vocab_size
_lowerCamelCase : int =hidden_size
_lowerCamelCase : str =num_hidden_layers
_lowerCamelCase : Any =num_attention_heads
_lowerCamelCase : str =intermediate_size
_lowerCamelCase : Tuple =hidden_dropout_prob
_lowerCamelCase : Any =attention_probs_dropout_prob
_lowerCamelCase : Optional[int] =max_position_embeddings
_lowerCamelCase : Any =eos_token_id
_lowerCamelCase : List[Any] =pad_token_id
_lowerCamelCase : Optional[Any] =bos_token_id
def lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCamelCase : Dict =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : int =np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Dict =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : int =prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : str =20
_lowerCamelCase : Tuple =model_class_name(lowercase__ )
_lowerCamelCase : Union[str, Any] =model.encode(inputs_dict['input_ids'] )
_lowerCamelCase , _lowerCamelCase : List[str] =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCamelCase : int =model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCamelCase : Optional[int] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Optional[int] =model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCamelCase : str =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
_lowerCamelCase : List[Any] =model.decode(lowercase__ , lowercase__ )
_lowerCamelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Any ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =20
_lowerCamelCase : Union[str, Any] =model_class_name(lowercase__ )
_lowerCamelCase : Tuple =model.encode(inputs_dict['input_ids'] )
_lowerCamelCase , _lowerCamelCase : Any =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCamelCase : Union[str, Any] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase : List[str] =model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCamelCase : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Optional[int] =model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCamelCase : Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase : Dict =model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCamelCase : Dict =model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
_lowerCamelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : Optional[int] =np.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCamelCase : List[Any] =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Dict =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : str =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Optional[int] =False
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : Any =FlaxPegasusModelTester(self )
_lowerCamelCase : List[Any] =ConfigTester(self , config_class=lowercase__ )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Union[str, Any] =self._prepare_for_class(lowercase__ , lowercase__ )
_lowerCamelCase : Any =model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase_ : int , lowercase_ : List[str]=None , **lowercase_ : Dict ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : Optional[Any] =encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : Any =encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Tuple =model_class(lowercase__ )
_lowerCamelCase : int =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCamelCase : Union[str, Any] ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : Optional[int] =decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : str =decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Any =model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowercase__ )
_lowerCamelCase : List[str] =np.ones((1, 1) )
_lowerCamelCase : Any =model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
_lowerCamelCase : Any =PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
_lowerCamelCase : Tuple =[
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
_lowerCamelCase : Tuple =[
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
_lowerCamelCase : List[str] =tokenizer(lowercase__ , return_tensors='np' , truncation=lowercase__ , max_length=512 , padding=lowercase__ )
_lowerCamelCase : Dict =model.generate(**lowercase__ , num_beams=2 ).sequences
_lowerCamelCase : List[Any] =tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 464 | def _lowerCamelCase ( snake_case ):
assert (
isinstance(snake_case , snake_case ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_lowerCAmelCase , _lowerCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_lowerCAmelCase , _lowerCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=3 , A__=32 , A__=3 , A__=10 , A__=[10, 20, 30, 40] , A__=[1, 1, 2, 1] , A__=True , A__=True , A__="relu" , A__=3 , A__=None , ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embeddings_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = len(A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> int:
_SCREAMING_SNAKE_CASE = TFResNetModel(config=A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFResNetForImageClassification(A__ )
_SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFResNetModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCamelCase ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ) -> str:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(A__ , A__ , A__ ):
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> List[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFResNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a (unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""tf""" )
# forward pass
_SCREAMING_SNAKE_CASE = model(**A__ )
# verify the logits
_SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
_SCREAMING_SNAKE_CASE = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A__ , atol=1E-4 ) )
| 712 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
snake_case = get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE : List[str] = (
os.path.join(UpperCAmelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE : int = Extractor
def _A ( self : Optional[Any] , UpperCAmelCase_ : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE : List[Any] = os.path.abspath(UpperCAmelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCAmelCase_ ) )
def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool ):
return force_extract or (
not os.path.isfile(UpperCAmelCase_ ) and not (os.path.isdir(UpperCAmelCase_ ) and os.listdir(UpperCAmelCase_ ))
)
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.extractor.infer_extractor_format(UpperCAmelCase_ )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE : Optional[Any] = self._get_output_path(UpperCAmelCase_ )
if self._do_extract(UpperCAmelCase_ , UpperCAmelCase_ ):
self.extractor.extract(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return output_path
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : Any , UpperCAmelCase_ : Union[Path, str] , **UpperCAmelCase_ : str ):
...
@staticmethod
@abstractmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
...
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[bytes] = []
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : int ):
with open(UpperCAmelCase_ , "rb" ) as f:
return f.read(UpperCAmelCase_ )
@classmethod
def _A ( cls : str , UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : bytes = b"" ):
if not magic_number:
SCREAMING_SNAKE_CASE : List[str] = max(len(UpperCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE : int = cls.read_magic_number(UpperCAmelCase_ , UpperCAmelCase_ )
except OSError:
return False
return any(magic_number.startswith(UpperCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
def _A ( cls : Dict , UpperCAmelCase_ : Union[Path, str] , **UpperCAmelCase_ : Any ):
return tarfile.is_tarfile(UpperCAmelCase_ )
@staticmethod
def _A ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
def resolved(UpperCAmelCase_ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCAmelCase_ ) )
def badpath(UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ).startswith(UpperCAmelCase_ )
def badlink(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE : Union[str, Any] = resolved(os.path.join(UpperCAmelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = resolved(UpperCAmelCase_ )
for finfo in members:
if badpath(finfo.name , UpperCAmelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tarfile.open(UpperCAmelCase_ )
tar_file.extractall(UpperCAmelCase_ , members=TarExtractor.safemembers(UpperCAmelCase_ , UpperCAmelCase_ ) )
tar_file.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = [b'''\x1F\x8B''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
with gzip.open(UpperCAmelCase_ , "rb" ) as gzip_file:
with open(UpperCAmelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def _A ( cls : Dict , UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : bytes = b"" ):
if super().is_extractable(UpperCAmelCase_ , magic_number=UpperCAmelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCAmelCase_ , "rb" ) as fp:
SCREAMING_SNAKE_CASE : int = _EndRecData(UpperCAmelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE : Optional[int] = fp.read(UpperCAmelCase_ ) # CD is where we expect it to be
if len(UpperCAmelCase_ ) == sizeCentralDir:
SCREAMING_SNAKE_CASE : Optional[int] = struct.unpack(UpperCAmelCase_ , UpperCAmelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with zipfile.ZipFile(UpperCAmelCase_ , "r" ) as zip_file:
zip_file.extractall(UpperCAmelCase_ )
zip_file.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
with lzma.open(UpperCAmelCase_ ) as compressed_file:
with open(UpperCAmelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = rarfile.RarFile(UpperCAmelCase_ )
rf.extractall(UpperCAmelCase_ )
rf.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
SCREAMING_SNAKE_CASE : Dict = zstd.ZstdDecompressor()
with open(UpperCAmelCase_ , "rb" ) as ifh, open(UpperCAmelCase_ , "wb" ) as ofh:
dctx.copy_stream(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = [b'''\x42\x5A\x68''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
with bza.open(UpperCAmelCase_ , "rb" ) as compressed_file:
with open(UpperCAmelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with pyazr.SevenZipFile(UpperCAmelCase_ , "r" ) as archive:
archive.extractall(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = [b'''\x04\x22\x4D\x18''']
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCAmelCase_ , "rb" ) as compressed_file:
with open(UpperCAmelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : Optional[Any] ):
return max(
len(UpperCAmelCase_ )
for extractor in cls.extractors.values()
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCAmelCase_ , magic_number_length=UpperCAmelCase_ )
except OSError:
return b""
@classmethod
def _A ( cls : Any , UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = cls.infer_extractor_format(UpperCAmelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : List[Any] , UpperCAmelCase_ : Union[Path, str] ): # <Added version="2.4.0"/>
SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE : Any = cls._read_magic_number(UpperCAmelCase_ , UpperCAmelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCAmelCase_ , magic_number=UpperCAmelCase_ ):
return extractor_format
@classmethod
def _A ( cls : Any , UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Union[Path, str] , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(UpperCAmelCase_ ) , exist_ok=UpperCAmelCase_ )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE : List[str] = str(Path(UpperCAmelCase_ ).with_suffix(".lock" ) )
with FileLock(UpperCAmelCase_ ):
shutil.rmtree(UpperCAmelCase_ , ignore_errors=UpperCAmelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
SCREAMING_SNAKE_CASE : Any = cls.extractors[extractor_format]
return extractor.extract(UpperCAmelCase_ , UpperCAmelCase_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCAmelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCAmelCase_ ):
return extractor.extract(UpperCAmelCase_ , UpperCAmelCase_ )
| 62 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : int , a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , a_ ):
a__ : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
a__ : str = [sequences]
a__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *a_ : Tuple , **a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] , a_ : int=True , a_ : Tuple=True , a_ : Tuple=TruncationStrategy.ONLY_FIRST , **a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a__ : List[str] = self.tokenizer.eos_token
try:
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : Tuple , **a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("multi_class" , a_ ) is not None:
a__ : str = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a__ : Tuple = {}
if "candidate_labels" in kwargs:
a__ : Any = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a__ : str = kwargs["hypothesis_template"]
a__ : Tuple = {}
if "multi_label" in kwargs:
a__ : Dict = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : List[Any] , ) -> Tuple:
'''simple docstring'''
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
a__ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple , a_ : Any=None , a_ : Dict="This example is {}." ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
a__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = inputs["candidate_label"]
a__ : Optional[int] = inputs["sequence"]
a__ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ : int = self.model(**a_ )
a__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Dict , a_ : Any , a_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = [outputs["candidate_label"] for outputs in model_outputs]
a__ : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
a__ : Union[str, Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a__ : List[str] = logits.shape[0]
a__ : Optional[int] = len(a_ )
a__ : List[str] = N // n
a__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ : str = self.entailment_id
a__ : str = -1 if entailment_id == 0 else 0
a__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ : List[Any] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ : str = reshaped_outputs[..., self.entailment_id]
a__ : Optional[int] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 642 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int = 4 ):
'''simple docstring'''
UpperCamelCase__ = abs(UpperCamelCase__ ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase__ )] for y in range(UpperCamelCase__ )]
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_row(transpose(UpperCamelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_row(reverse_column(UpperCamelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_column(transpose(UpperCamelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
UpperCamelCase__ = [list(UpperCamelCase__ ) for x in zip(*UpperCamelCase__ )]
return matrix
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
UpperCamelCase__ = matrix[::-1]
return matrix
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
UpperCamelCase__ = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
for i in matrix:
print(*UpperCamelCase__ )
if __name__ == "__main__":
lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 591 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = ['''image_processor''', '''tokenizer''']
_A : List[str] = '''LayoutLMv3ImageProcessor'''
_A : Optional[int] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : str , _a : Optional[Any]=None , _a : Optional[Any]=None , **_a : Optional[Any] ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
UpperCamelCase__ = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self : List[str] , _a : Any , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _a : Union[List[List[int]], List[List[List[int]]]] = None , _a : Optional[Union[List[int], List[List[int]]]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : List[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features['''words''']
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
UpperCamelCase__ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ = images
return encoded_inputs
def A_ ( self : Optional[int] , _a : int , _a : Tuple ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_a )} and {len(_a )}""" )
return images_with_overflow
def A_ ( self : List[Any] , *_a : List[Any] , **_a : Optional[Any] ):
return self.tokenizer.batch_decode(*_a , **_a )
def A_ ( self : int , *_a : str , **_a : str ):
return self.tokenizer.decode(*_a , **_a )
@property
def A_ ( self : Any ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A_ ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def A_ ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 591 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=8 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=16 , snake_case_=5 , snake_case_=2 , snake_case_=36 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = 300
return config
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = MraModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = MraForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = ()
def lowerCAmelCase__ ( self ):
_A = MraModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MraModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase__ ( self ):
return
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_A = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 27 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = module
_SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
_SCREAMING_SNAKE_CASE : Any = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A__ = """bigscience/bloom-1b7"""
# Constant values
A__ = 2.109_6595_5269_2574
A__ = """Hello my name is"""
A__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
A__ = 10
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(self.model_name )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(__a , "quantization_config" ) )
_SCREAMING_SNAKE_CASE : Dict = config.to_dict()
_SCREAMING_SNAKE_CASE : Tuple = config.to_diff_dict()
_SCREAMING_SNAKE_CASE : List[Any] = config.to_json_string()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
_SCREAMING_SNAKE_CASE : Dict = self.model_fpaa.get_memory_footprint()
_SCREAMING_SNAKE_CASE : Tuple = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_SCREAMING_SNAKE_CASE : List[str] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Any = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = BitsAndBytesConfig()
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map="auto" )
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = BitsAndBytesConfig()
with self.assertRaises(__a ):
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[str] = self.model_fpaa.to(torch.floataa )
_SCREAMING_SNAKE_CASE : List[str] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : str = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
_SCREAMING_SNAKE_CASE : List[str] = self.model_fpaa.float()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = "t5-small"
_SCREAMING_SNAKE_CASE : Optional[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
_SCREAMING_SNAKE_CASE : Dict = "Translate in German: Hello, my dog is cute"
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
_SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
_SCREAMING_SNAKE_CASE : List[str] = None
# test with `t5-small`
_SCREAMING_SNAKE_CASE : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
_SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : Tuple = model.generate(**__a )
# test with `flan-t5-small`
_SCREAMING_SNAKE_CASE : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
_SCREAMING_SNAKE_CASE : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : Any = model.generate(**__a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = modules
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_SCREAMING_SNAKE_CASE : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : Any = model.generate(**__a )
# test with `flan-t5-small`
_SCREAMING_SNAKE_CASE : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_SCREAMING_SNAKE_CASE : List[str] = model.generate(**__a )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().setUp()
# model_name
_SCREAMING_SNAKE_CASE : Dict = "bigscience/bloom-560m"
_SCREAMING_SNAKE_CASE : Optional[Any] = "t5-small"
# Different types of model
_SCREAMING_SNAKE_CASE : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Sequence classification model
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map="auto" )
# CausalLM model
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Seq2seq model
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map="auto" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().setUp()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_SCREAMING_SNAKE_CASE : Any = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().setUp()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_SCREAMING_SNAKE_CASE : List[str] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = "facebook/opt-350m"
super().setUp()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_SCREAMING_SNAKE_CASE : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_SCREAMING_SNAKE_CASE : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = LoRALayer(module.q_proj , rank=16 )
_SCREAMING_SNAKE_CASE : Dict = LoRALayer(module.k_proj , rank=16 )
_SCREAMING_SNAKE_CASE : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_SCREAMING_SNAKE_CASE : List[Any] = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """gpt2-xl"""
A__ = 3.3191_8548_5415_2187
| 712 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'whisper'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = vocab_size
lowercase__ : Tuple = num_mel_bins
lowercase__ : int = d_model
lowercase__ : str = encoder_layers
lowercase__ : str = encoder_attention_heads
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Dict = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : List[str] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = max_source_positions
lowercase__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple = classifier_proj_size
lowercase__ : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Optional[int] = apply_spec_augment
lowercase__ : Any = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
lowercase__ : Dict = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
])
if self.use_past:
lowercase__ : List[str] = {0: """batch"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
'''simple docstring'''
lowercase__ : str = OrderedDict()
lowercase__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = encoder_inputs["""input_features"""].shape[2]
lowercase__ : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = encoder_inputs.pop("""input_features""")
lowercase__ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""")
if "past_key_values" in decoder_inputs:
lowercase__ : Dict = decoder_inputs.pop("""past_key_values""")
return dummy_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-3
| 12 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> int:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Union[str, Any]:
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> str:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Dict:
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
_A = attentions
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> List[Any]:
_A = self.resnets[0](_UpperCamelCase , _UpperCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
return hidden_states
| 292 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
UpperCamelCase__ = {'''allegro/herbert-base-cased''': 5_1_4}
UpperCamelCase__ = {}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = HerbertTokenizer
def __init__( self : Optional[int] , _A : int=None , _A : List[str]=None , _A : Any=None , _A : Dict="<s>" , _A : List[Any]="<unk>" , _A : Dict="<pad>" , _A : Tuple="<mask>" , _A : str="</s>" , **_A : Dict , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , )
def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
UpperCAmelCase__ : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[Any] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 705 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCAmelCase__ : Optional[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase ( __lowercase , __lowercase ):
UpperCamelCase_ : Optional[Any] = "pixel_values"
UpperCamelCase_ : int = False
UpperCamelCase_ : Dict = TimmBackboneConfig
def __init__( self :Tuple , lowercase :List[str] , **lowercase :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(snake_case_ )
SCREAMING_SNAKE_CASE = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(snake_case_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
SCREAMING_SNAKE_CASE = getattr(snake_case_ , '''use_pretrained_backbone''' , snake_case_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE = config.out_indices if getattr(snake_case_ , '''out_indices''' , snake_case_ ) is not None else (-1,)
SCREAMING_SNAKE_CASE = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE = self._backbone.return_layers
SCREAMING_SNAKE_CASE = {layer['''module''']: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def snake_case__ ( cls :Dict , lowercase :Dict , *lowercase :List[Any] , **lowercase :Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE = kwargs.pop('''config''' , TimmBackboneConfig() )
SCREAMING_SNAKE_CASE = kwargs.pop('''use_timm_backbone''' , snake_case_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
SCREAMING_SNAKE_CASE = kwargs.pop('''num_channels''' , config.num_channels )
SCREAMING_SNAKE_CASE = kwargs.pop('''features_only''' , config.features_only )
SCREAMING_SNAKE_CASE = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE = kwargs.pop('''out_indices''' , config.out_indices )
SCREAMING_SNAKE_CASE = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def snake_case__ ( self :Dict , lowercase :int ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self :List[str] , lowercase :Any , lowercase :Tuple=None , lowercase :Union[str, Any]=None , lowercase :Optional[int]=None , **lowercase :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE = self._all_layers
SCREAMING_SNAKE_CASE = self._backbone(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE = self._return_layers
SCREAMING_SNAKE_CASE = tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE = self._backbone(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = tuple(snake_case_ )
SCREAMING_SNAKE_CASE = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ ) | 201 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 521 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowercase__ : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ , lowercase__ : int = emb.weight.shape
lowercase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ )
lowercase__ : str = emb.weight.data
return lin_layer
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ ,map_location="cpu" )
lowercase__ : Dict = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ : Dict = mam_aaa["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : Tuple = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ ,max_position_embeddings=10_24 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="relu" ,)
lowercase__ : str = state_dict["decoder.embed_tokens.weight"]
lowercase__ : Optional[Any] = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ ,strict=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__a : int = parser.parse_args()
__a : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 397 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a : Tuple = '''▁'''
__a : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = BigBirdTokenizer
a : List[str] = BigBirdTokenizerFast
a : str = True
a : int = True
def __a ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
lowercase__ : List[Any] = self.tokenizer_class(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = "<s>"
lowercase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def __a ( self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[Any] = "I was born in 92000, and this is falsé."
lowercase__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase )
lowercase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowercase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : int = tokenizer.encode(lowerCamelCase )
lowercase__ : Any = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = BigBirdTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
lowercase__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
lowercase__ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = "Hello World!"
lowercase__ : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase__ : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Any = " ".join(lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : str = BigBirdConfig(attention_type="original_full" )
lowercase__ : Union[str, Any] = BigBirdModel(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : str = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase__ : List[Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : str = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , ) | 397 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__magic_name__ = "bart"
__magic_name__ = True
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _lowerCAmelCase ( ):
if LOAD_DENSE_INDEX:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__SCREAMING_SNAKE_CASE = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__SCREAMING_SNAKE_CASE = qar_model.eval()
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (None, None)
if MODEL_TYPE == "bart":
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__SCREAMING_SNAKE_CASE = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__SCREAMING_SNAKE_CASE = sas_model.eval()
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _lowerCAmelCase ( ):
if LOAD_DENSE_INDEX:
__SCREAMING_SNAKE_CASE = faiss.StandardGpuResources()
__SCREAMING_SNAKE_CASE = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__SCREAMING_SNAKE_CASE = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 )
__SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ )
wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (None, None)
__SCREAMING_SNAKE_CASE = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__SCREAMING_SNAKE_CASE = elia["""train_eli5"""]
__SCREAMING_SNAKE_CASE = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase_ )
return (elia_train, eli5_train_q_index)
__magic_name__, __magic_name__, __magic_name__ = load_indexes()
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = load_models()
__magic_name__, __magic_name__ = load_train_data()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=10 ):
__SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = [elia_train[int(UpperCamelCase_ )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_="wiki40b" , UpperCamelCase_="dense" , UpperCamelCase_=10 ):
if source == "none":
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = query_qa_dense_index(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = query_es_index(
UpperCamelCase_ , UpperCamelCase_ , index_name="""english_wiki40b_snippets_100w""" , n_results=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__SCREAMING_SNAKE_CASE = """question: {} context: {}""".format(UpperCamelCase_ , UpperCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase_ : None),
} )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=64 , UpperCamelCase_=256 , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=0.95 , UpperCamelCase_=0.8 ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE = qa_sas_generate(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__magic_name__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__magic_name__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__magic_name__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__magic_name__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__magic_name__ = st.sidebar.checkbox("Demo options")
if demo_options:
__magic_name__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__magic_name__ = action_list.index(action_st)
__magic_name__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__magic_name__ = show_type == "Show full text of passages"
else:
__magic_name__ = 3
__magic_name__ = True
__magic_name__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__magic_name__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__magic_name__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__magic_name__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__magic_name__ = "wiki40b"
__magic_name__ = "dense"
__magic_name__ = "beam"
__magic_name__ = 2
__magic_name__ = 64
__magic_name__ = 256
__magic_name__ = None
__magic_name__ = None
__magic_name__ = st.sidebar.checkbox("Generation options")
if generate_options:
__magic_name__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__magic_name__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__magic_name__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__magic_name__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__magic_name__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__magic_name__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__magic_name__ = None
# start main text
__magic_name__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__magic_name__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__magic_name__ = st.text_input("Enter your question here:", "")
else:
__magic_name__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__magic_name__, __magic_name__ = make_support(question, source=wiki_source, method="dense", n_results=10)
__magic_name__, __magic_name__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
__magic_name__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__magic_name__ = support_list[:10]
__magic_name__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__magic_name__, __magic_name__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__magic_name__, __magic_name__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__magic_name__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__magic_name__ = res[1].strip()
if sec_titles == "":
__magic_name__ = "[{}]({})".format(res[0], wiki_url)
else:
__magic_name__ = sec_titles.split(" & ")
__magic_name__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__magic_name__ = find_nearest_training(question)
__magic_name__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__magic_name__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__magic_name__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 248 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 248 | 1 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :list[int] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int , lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
# Base Case
if index == len(lowerCamelCase_ ):
return True
# Recursive Step
for i in range(lowerCamelCase_ ):
if valid_coloring(graph[index] , lowerCamelCase_ , lowerCamelCase_ ):
# Color current vertex
snake_case_ : Optional[int] = i
# Validate coloring
if util_color(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 ):
return True
# Backtrack
snake_case_ : Union[str, Any] = -1
return False
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[str] = [-1] * len(lowerCamelCase_ )
if util_color(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 0 ):
return colored_vertices
return [] | 334 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : List[str] = np.asarray(weights[0] )
snake_case_ : Dict = np.asarray(weights[1] )
snake_case_ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : Tuple = np.asarray(weights[0] )
snake_case_ : List[Any] = np.asarray(weights[1] )
snake_case_ : Dict = np.asarray(weights[2] )
snake_case_ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
# layernorm 1
snake_case_ : str = weights[0][0][0]
snake_case_ : Tuple = np.asarray(layer_norm_a[0] )
snake_case_ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# lsh weights + output
snake_case_ : Dict = weights[0][1]
if len(lowerCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
else:
set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
# intermediate weighs
snake_case_ : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase_ ) == 4:
snake_case_ : Dict = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# intermediate dense
snake_case_ : Optional[Any] = np.asarray(intermediate_weights[1][0] )
snake_case_ : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
# intermediate out
snake_case_ : Optional[int] = np.asarray(intermediate_weights[4][0] )
snake_case_ : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
# reformer model
snake_case_ : List[Any] = torch_model.reformer
# word embeds
snake_case_ : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , )
if isinstance(weights[3] , lowerCamelCase_ ):
snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Any = nn.Parameter(torch.tensor(lowerCamelCase_ ) )
snake_case_ : Tuple = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# output layer norm
snake_case_ : str = np.asarray(weights[7][0] )
snake_case_ : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# output embeddings
snake_case_ : Dict = np.asarray(weights[9][0] )
snake_case_ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
# Initialise PyTorch model
snake_case_ : Dict = ReformerConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : Dict = ReformerModelWithLMHead(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : Tuple = pickle.load(lowerCamelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 334 | 1 |
import argparse
import os
import re
import packaging.version
snake_case__ : Optional[Any] = 'examples/'
snake_case__ : List[str] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
snake_case__ : Dict = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
snake_case__ : Any = 'README.md'
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase =f.read()
_UpperCAmelCase =REPLACE_PATTERNS[pattern]
_UpperCAmelCase =replace.replace("VERSION" , __lowerCAmelCase )
_UpperCAmelCase =re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase ) ->Union[str, Any]:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def lowerCamelCase__ ( ) ->List[str]:
_UpperCAmelCase ="🤗 Transformers currently provides the following architectures"
_UpperCAmelCase ="1. Want to contribute a new model?"
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase =f.readlines()
# Find the start of the list.
_UpperCAmelCase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_UpperCAmelCase =lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
def lowerCamelCase__ ( ) ->Union[str, Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
_UpperCAmelCase =f.read()
_UpperCAmelCase =REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase=False ) ->Dict:
_UpperCAmelCase =get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_UpperCAmelCase =default_version.base_version
elif patch:
_UpperCAmelCase =F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
_UpperCAmelCase =F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
_UpperCAmelCase =input(F"Which version are you releasing? [{default_version}]" )
if len(__lowerCAmelCase ) == 0:
_UpperCAmelCase =default_version
print(F"Updating version to {version}." )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ) ->List[Any]:
_UpperCAmelCase =get_version()
_UpperCAmelCase =F"{current_version.major}.{current_version.minor + 1}.0.dev0"
_UpperCAmelCase =current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase =input(F"Which version are we developing now? [{dev_version}]" )
if len(__lowerCAmelCase ) == 0:
_UpperCAmelCase =dev_version
print(F"Updating version to {version}." )
global_version_update(__lowerCAmelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
snake_case__ : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 718 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,__UpperCAmelCase ,)
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = RobertaConfig
_SCREAMING_SNAKE_CASE = """roberta"""
def __init__( self , lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(lowercase_ )
_snake_case : str = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ ,__UpperCAmelCase ,)
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = RobertaConfig
_SCREAMING_SNAKE_CASE = """roberta"""
def __init__( self , lowercase_ ) -> str:
'''simple docstring'''
super().__init__(lowercase_ )
_snake_case : List[str] = config.num_labels
_snake_case : List[str] = config.num_hidden_layers
_snake_case : Dict = DeeRobertaModel(lowercase_ )
_snake_case : Dict = nn.Dropout(config.hidden_dropout_prob )
_snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def __a ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Dict = self.num_layers
try:
_snake_case : Any = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
_snake_case : List[Any] = outputs[1]
_snake_case : Optional[Any] = self.dropout(lowercase_ )
_snake_case : Any = self.classifier(lowercase_ )
_snake_case : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_snake_case : Dict = e.message
_snake_case : Any = e.exit_layer
_snake_case : Tuple = outputs[0]
if not self.training:
_snake_case : Tuple = entropy(lowercase_ )
_snake_case : str = []
_snake_case : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_snake_case : List[str] = MSELoss()
_snake_case : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case : Tuple = CrossEntropyLoss()
_snake_case : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_snake_case : int = []
for highway_exit in outputs[-1]:
_snake_case : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_snake_case : Optional[Any] = MSELoss()
_snake_case : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case : Optional[Any] = CrossEntropyLoss()
_snake_case : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
_snake_case : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_snake_case : List[str] = (loss,) + outputs
if not self.training:
_snake_case : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_snake_case : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 326 |
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = 1.0E4 , lowercase_ = False , lowercase_ = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_snake_case : Union[str, Any] = float(embedding_dim // 2 )
_snake_case : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_snake_case : Union[str, Any] = min_timescale * jnp.exp(jnp.arange(lowercase_ , dtype=jnp.floataa ) * -log_timescale_increment )
_snake_case : Any = jnp.expand_dims(lowercase_ , 1 ) * jnp.expand_dims(lowercase_ , 0 )
# scale embeddings
_snake_case : Any = scale * emb
if flip_sin_to_cos:
_snake_case : str = jnp.concatenate([jnp.cos(lowercase_ ), jnp.sin(lowercase_ )] , axis=1 )
else:
_snake_case : Optional[int] = jnp.concatenate([jnp.sin(lowercase_ ), jnp.cos(lowercase_ )] , axis=1 )
_snake_case : Optional[Any] = jnp.reshape(lowercase_ , [jnp.shape(lowercase_ )[0], embedding_dim] )
return signal
class A (nn.Module ):
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = jnp.floataa
@nn.compact
def __call__( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowercase_ )
_snake_case : List[Any] = nn.silu(lowercase_ )
_snake_case : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowercase_ )
return temb
class A (nn.Module ):
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 1
@nn.compact
def __call__( self , lowercase_ ) -> Any:
'''simple docstring'''
return get_sinusoidal_embeddings(
lowercase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 326 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = "▁"
UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCamelCase = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCamelCase = {"vinai/bartpho-syllable": 1_024}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :List[Any]="<s>" , lowerCamelCase__ :List[str]="</s>" , lowerCamelCase__ :Dict="</s>" , lowerCamelCase__ :List[str]="<s>" , lowerCamelCase__ :int="<unk>" , lowerCamelCase__ :Optional[int]="<pad>" , lowerCamelCase__ :List[Any]="<mask>" , lowerCamelCase__ :Optional[Dict[str, Any]] = None , **lowerCamelCase__ :Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :Dict = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
UpperCamelCase__ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
UpperCamelCase__ :List[Any] = vocab_file
UpperCamelCase__ :Dict = monolingual_vocab_file
UpperCamelCase__ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase__ :List[Any] = {}
UpperCamelCase__ :Tuple = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase__ :List[Any] = cnt
cnt += 1
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase__ :Dict = line.strip().split()[0]
UpperCamelCase__ :Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase__ :str = len(self.fairseq_tokens_to_ids )
UpperCamelCase__ :Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[int] ):
UpperCamelCase__ :List[Any] = self.__dict__.copy()
UpperCamelCase__ :str = None
UpperCamelCase__ :List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase__ :int = {}
UpperCamelCase__ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self :Tuple , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ :str = [self.cls_token_id]
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self :Optional[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None , lowerCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __a ( self :Dict , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :int = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self :Dict ):
return len(self.fairseq_ids_to_tokens )
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self :Dict , lowerCamelCase__ :str ):
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __a ( self :Any , lowerCamelCase__ :str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __a ( self :Optional[int] , lowerCamelCase__ :Tuple ):
return self.fairseq_ids_to_tokens[index]
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :List[str] = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ :List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ :Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
UpperCamelCase__ :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(UpperCAmelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file | 721 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :int ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) | 383 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Any , __snake_case : Tuple=13 , __snake_case : str=7 , __snake_case : List[Any]=True , __snake_case : Dict=True , __snake_case : Tuple=False , __snake_case : Dict=True , __snake_case : Any=99 , __snake_case : List[Any]=32 , __snake_case : List[Any]=5 , __snake_case : int=4 , __snake_case : Optional[int]=64 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : int=5_12 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : List[Any]=3 , __snake_case : Tuple=4 , __snake_case : int=None , __snake_case : Union[str, Any]=2 , __snake_case : Tuple=2 , __snake_case : Any=2 , __snake_case : int=2 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=1 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = q_groups
UpperCAmelCase_ = k_groups
UpperCAmelCase_ = v_groups
UpperCAmelCase_ = post_attention_groups
UpperCAmelCase_ = intermediate_groups
UpperCAmelCase_ = output_groups
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Dict ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : Dict ):
UpperCAmelCase_ = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict ):
UpperCAmelCase_ = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Union[str, Any] ):
UpperCAmelCase_ = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict , __snake_case : str , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Any , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str] ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(UpperCAmelCase_) = config_and_inputs
UpperCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase : Dict = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = False
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = SqueezeBertModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
UpperCAmelCase_ = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase_ = model(lowerCamelCase_ )[0]
UpperCAmelCase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase_ )
UpperCAmelCase_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
| 144 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=64 , lowerCamelCase__=None) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = np.random.default_rng(lowerCamelCase__)
snake_case__ : Any = length
snake_case__ : int = rng.normal(size=(length,)).astype(np.floataa)
snake_case__ : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self) -> Tuple:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=False) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = torch.nn.Parameter(torch.tensor([2, 3]).float())
snake_case__ : Any = torch.nn.Parameter(torch.tensor([2, 3]).float())
snake_case__ : int = True
def UpperCAmelCase ( self , lowerCamelCase__=None) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
snake_case__ : int = False
return x * self.a[0] + self.b[0]
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=False) -> Dict:
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = torch.nn.Parameter(torch.tensor(lowerCamelCase__).float())
snake_case__ : Tuple = torch.nn.Parameter(torch.tensor(lowerCamelCase__).float())
snake_case__ : Dict = True
def UpperCAmelCase ( self , lowerCamelCase__=None) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
snake_case__ : Tuple = False
return x * self.a + self.b
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int = 16 ) -> Optional[Any]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case__ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case__ : Union[str, Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
snake_case__ : Dict = load_dataset("csv" , data_files=_UpperCAmelCase )
snake_case__ : Optional[Any] = datasets["train"].unique("label" )
snake_case__ : Optional[Any] = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
if "label" in examples:
snake_case__ : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : List[Any] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_UpperCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ : Any = DataLoader(tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
snake_case__ : str = DataLoader(tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 150 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = """\
Text data.
Second line of data."""
lowercase = """file"""
@pytest.fixture(scope="session" )
def A__ ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case__ : Optional[int] = bytes(_UpperCAmelCase , "utf-8" )
with zstd.open(_UpperCAmelCase , "wb" ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def A__ ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , "w" ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ : str = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case__ : List[str] = input_paths[compression_format]
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Tuple = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
snake_case__ : Any = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
snake_case__ : str = f.read()
with open(_UpperCAmelCase ) as f:
snake_case__ : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = "custom_cache"
snake_case__ : Any = "custom_extracted_dir"
snake_case__ : List[str] = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case__ : Tuple = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _UpperCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
snake_case__ : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case__ : List[Any] = xz_file
snake_case__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
snake_case__ : List[Any] = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def A__ ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def A__ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
snake_case__ : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_UpperCAmelCase ) as f:
snake_case__ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
http_get("https://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 150 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__snake_case = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__snake_case = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def _lowercase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = SavedModel()
UpperCamelCase = []
with open(os.path.join(_SCREAMING_SNAKE_CASE , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] )
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_SCREAMING_SNAKE_CASE )
if strict and len(_SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(_SCREAMING_SNAKE_CASE ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*_SCREAMING_SNAKE_CASE , sep="""\n""" )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__snake_case = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 701 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 181 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def _A ( A__ ):
"""simple docstring"""
return np.array_equal(A__ , matrix.conjugate().T )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = v.conjugate().T
__lowercase = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def _A ( ):
"""simple docstring"""
__lowercase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
__lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowercase : Dict =[
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for attribute in key.split('.' ):
lowerCamelCase_ : str = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if weight_type is not None:
lowerCamelCase_ : List[str] = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape
else:
lowerCamelCase_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCamelCase_ : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCamelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase_ : List[Any] = value
elif weight_type == "bias":
lowerCamelCase_ : Optional[Any] = value
else:
lowerCamelCase_ : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : str = []
lowerCamelCase_ : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase_ : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == 'group' ,)
lowerCamelCase_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase_ : List[Any] = True
if "*" in mapped_key:
lowerCamelCase_ : List[Any] = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
lowerCamelCase_ : Tuple = mapped_key.replace('*' ,lowerCAmelCase__ )
if "weight_g" in name:
lowerCamelCase_ : List[str] = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ : Optional[Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
lowerCamelCase_ : List[str] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : List[str] = 'weight'
else:
lowerCamelCase_ : List[Any] = None
set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : int = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ : List[str] = name.split('.' )
lowerCamelCase_ : Dict = int(items[0] )
lowerCamelCase_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCamelCase_ : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCamelCase_ : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCamelCase_ : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCamelCase_ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
# load the pre-trained checkpoints
lowerCamelCase_ : Any = torch.load(lowerCAmelCase__ )
lowerCamelCase_ : str = WavLMConfigOrig(checkpoint['cfg'] )
lowerCamelCase_ : List[Any] = WavLMOrig(lowerCAmelCase__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
lowerCamelCase_ : int = WavLMConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCamelCase_ : str = WavLMConfig()
lowerCamelCase_ : str = WavLMModel(lowerCAmelCase__ )
recursively_load_weights(lowerCAmelCase__ ,lowerCAmelCase__ )
hf_wavlm.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowercase : Tuple =parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 364 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
if num <= 0:
lowerCamelCase_ : Optional[int] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(lowerCAmelCase__ )
lowerCamelCase_ : str = [True] * (num + 1)
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : List[str] = int(math.sqrt(lowerCAmelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase__ )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,lowerCAmelCase__ ):
if sieve[i] is True:
lowerCamelCase_ : Tuple = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 364 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class A :
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ = value
A__ = None
A__ = None
class A :
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = tree
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ) -> List[Any]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
def __lowerCamelCase ( __a :int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
A__ = [True] * (num + 1)
A__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __a ):
A__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 247 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
a_ : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 484 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'spiece.model'}
a_ : Optional[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
'''simple docstring'''
return len(self.sp_model )
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase = " ".join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("NFKD" , __a )
lowerCamelCase = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.preprocess_text(__a )
lowerCamelCase = self.sp_model.encode(__a , out_type=__a )
lowerCamelCase = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _a (self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = super()._decode(*__a , **__a )
lowerCamelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 484 | 1 |
"""simple docstring"""
from copy import deepcopy
class a :
def __init__( self , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if arr is None and size is not None:
lowerCAmelCase = size
lowerCAmelCase = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('Either arr or size must be specified' )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = len(__lowerCamelCase )
lowerCAmelCase = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
lowerCAmelCase = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
return index - (index & (-index))
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase = self.next_(__lowerCamelCase )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if right == 0:
return 0
lowerCAmelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase = self.prev(__lowerCamelCase )
return result
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ['''torch''', '''scipy''']
def __init__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 103 | 0 |
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = set()
# edges = list of graph's edges
__lowerCAmelCase = get_edges(_lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__lowerCAmelCase , __lowerCAmelCase = edges.pop()
chosen_vertices.add(_lowerCAmelCase )
chosen_vertices.add(_lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCAmelCase )
return chosen_vertices
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 706 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = MgpstrTokenizer
_snake_case = False
_snake_case = {}
_snake_case = False
def A__ ( self ) -> List[Any]:
super().setUp()
# fmt: off
__lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
def A__ ( self , **snake_case_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """tester"""
__lowerCAmelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
__lowerCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
__lowerCAmelCase = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def A__ ( self ) -> Any:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def A__ ( self ) -> List[Any]:
pass
| 573 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'mctct'
def __init__( self : Any ,lowercase__ : Any=8_0_6_5 ,lowercase__ : Optional[Any]=1_5_3_6 ,lowercase__ : Dict=3_6 ,lowercase__ : List[Any]=6_1_4_4 ,lowercase__ : Any=4 ,lowercase__ : Tuple=3_8_4 ,lowercase__ : Tuple=9_2_0 ,lowercase__ : List[Any]=1e-5 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : str="relu" ,lowercase__ : Optional[Any]=0.0_2 ,lowercase__ : List[str]=0.3 ,lowercase__ : Any=0.3 ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Dict=2 ,lowercase__ : int=1 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Optional[int]=(7,) ,lowercase__ : Optional[int]=(3,) ,lowercase__ : Dict=8_0 ,lowercase__ : List[Any]=1 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple="sum" ,lowercase__ : Optional[int]=False ,**lowercase__ : Dict ,):
super().__init__(**lowercase__ ,pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = max_position_embeddings
__lowercase = layer_norm_eps
__lowercase = layerdrop
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = conv_glu_dim
__lowercase = conv_dropout
__lowercase = num_conv_layers
__lowercase = input_feat_per_channel
__lowercase = input_channels
__lowercase = conv_channels
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ : List[Any] = get_logger(__name__)
class UpperCamelCase__ :
lowerCAmelCase__ : List[Any] = "dummy_data"
lowerCAmelCase__ : Tuple = "datasets"
lowerCAmelCase__ : List[str] = False
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Union[Version, str] , lowerCamelCase : Optional[str] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[List[Callable]] = None , ):
'''simple docstring'''
a__ = 0
a__ = dataset_name
a__ = cache_dir
a__ = use_local_dummy_data
a__ = config
# download_callbacks take a single url as input
a__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ = str(lowerCamelCase )
# to be downloaded
a__ = None
a__ = None
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
if self._dummy_file is None:
a__ = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __a ( self : int ):
'''simple docstring'''
a__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ = cached_path(
lowerCamelCase , cache_dir=self.cache_dir , extract_compressed_file=lowerCamelCase , force_extract=lowerCamelCase )
return os.path.join(lowerCamelCase , self.dummy_file_name )
@property
def __a ( self : Dict ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : int ):
'''simple docstring'''
if self._bucket_url is None:
a__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __a ( self : List[str] ):
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __a ( self : Optional[int] , lowerCamelCase : Union[str, Any] , *lowerCamelCase : Tuple ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCamelCase , lowerCamelCase ):
return self.create_dummy_data_dict(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , (list, tuple) ):
return self.create_dummy_data_list(lowerCamelCase , lowerCamelCase )
else:
return self.create_dummy_data_single(lowerCamelCase , lowerCamelCase )
def __a ( self : str , lowerCamelCase : Union[str, Any] , *lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.download_and_extract(lowerCamelCase )
def __a ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
return self.download_and_extract(lowerCamelCase )
def __a ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return path
def __a ( self : Tuple ):
'''simple docstring'''
return {}
def __a ( self : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCamelCase , lowerCamelCase ):
for single_url in single_urls:
download_callback(lowerCamelCase )
else:
a__ = single_urls
download_callback(lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCamelCase , lowerCamelCase ):
a__ = [os.path.join(lowerCamelCase , urllib.parse.quote_plus(Path(lowerCamelCase ).name ) ) for x in single_urls]
else:
a__ = single_urls
a__ = os.path.join(lowerCamelCase , urllib.parse.quote_plus(Path(lowerCamelCase ).name ) )
a__ = value
# make sure that values are unique
if all(isinstance(lowerCamelCase , lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
a__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCamelCase ) ) for url in data_url )
a__ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ = [data_url[0]] * len(lowerCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ = os.path.join(lowerCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCamelCase )
return dummy_data_list
def __a ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ = os.path.join(lowerCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : int ):
'''simple docstring'''
pass
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
def _iter_archive_members(lowerCamelCase : str ):
# this preserves the order of the members inside the ZIP archive
a__ = Path(self.dummy_file ).parent
a__ = path.relative_to(lowerCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCamelCase )
a__ = Path(lowerCamelCase )
a__ = _iter_archive_members(lowerCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCamelCase ).as_posix(), file_path.open("rb" )
def __a ( self : Optional[Any] , lowerCamelCase : List[str] ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
a__ = [paths]
for path in paths:
if os.path.isfile(lowerCamelCase ):
if os.path.basename(lowerCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCamelCase ):
if os.path.basename(lowerCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCamelCase , lowerCamelCase )
| 715 |
'''simple docstring'''
import math
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> list:
a__ = end or len(__lowerCamelCase )
for i in range(__lowerCamelCase , __lowerCamelCase ):
a__ = i
a__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a__ = array[temp_index - 1]
temp_index -= 1
a__ = temp_index_value
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: # Max Heap
a__ = index
a__ = 2 * index + 1 # Left Node
a__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a__ = right_index
if largest != index:
a__ , a__ = array[largest], array[index]
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
a__ = len(__lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
a__ , a__ = array[0], array[i]
heapify(__lowerCamelCase , 0 , __lowerCamelCase )
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
a__ = low
a__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a__ , a__ = array[j], array[i]
i += 1
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) == 0:
return array
a__ = 2 * math.ceil(math.loga(len(__lowerCamelCase ) ) )
a__ = 16
return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCamelCase )
max_depth -= 1
a__ = median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
a__ = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a__ = p
return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Optional[int] = input("Enter numbers separated by a comma : ").strip()
lowerCAmelCase_ : List[str] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 289 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : Node | None
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = None
for i in sorted(A , reverse=A ):
_a = Node(A , self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
_a = self.head
while node:
yield node.data
_a = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(A ) for node in self] )
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return SortedLinkedList(list(__A) + list(__A))
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 11 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = position
_UpperCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase : Optional[Any] = []
for position in positions:
_UpperCamelCase , _UpperCamelCase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase_ )
return permissible_positions
def A__ ( UpperCAmelCase_ ):
return not any(elem == 0 for row in board for elem in row )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if is_complete(UpperCAmelCase_ ):
return True
for position in get_valid_pos(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase , _UpperCamelCase : Any = position
if board[y][x] == 0:
_UpperCamelCase : int = curr + 1
if open_knight_tour_helper(UpperCAmelCase_ , UpperCAmelCase_ , curr + 1 ):
return True
_UpperCamelCase : Union[str, Any] = 0
return False
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 1
if open_knight_tour_helper(UpperCAmelCase_ , (i, j) , 1 ):
return board
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=99 , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=9 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=8 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.002 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =encoder_seq_length
snake_case__ =decoder_seq_length
# For common tests
snake_case__ =self.decoder_seq_length
snake_case__ =is_training
snake_case__ =use_attention_mask
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =d_ff
snake_case__ =relative_attention_num_buckets
snake_case__ =dropout_rate
snake_case__ =initializer_factor
snake_case__ =eos_token_id
snake_case__ =pad_token_id
snake_case__ =decoder_start_token_id
snake_case__ =None
snake_case__ =decoder_layers
def _lowercase ( self ) -> Tuple:
return TaConfig.from_pretrained('google/umt5-base' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[str]:
if attention_mask is None:
snake_case__ =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_UpperCAmelCase )
if decoder_head_mask is None:
snake_case__ =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase )
if cross_attn_head_mask is None:
snake_case__ =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case__ =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ =input_ids.clamp(self.pad_token_id + 1 )
snake_case__ =decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ =self.get_config()
snake_case__ =config.num_attention_heads
snake_case__ =self.prepare_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, input_dict
def _lowercase ( self ) -> Tuple:
snake_case__ , snake_case__ =self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self ) -> Dict:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Any:
snake_case__ =UMTaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(
input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , )
snake_case__ =model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
snake_case__ =result.last_hidden_state
snake_case__ =result.past_key_values
snake_case__ =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[str]:
snake_case__ =UMTaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval()
# first forward pass
snake_case__ =model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
snake_case__ , snake_case__ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case__ =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ =model(_UpperCAmelCase )['last_hidden_state']
snake_case__ =model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['last_hidden_state']
# select random slice
snake_case__ =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ =output_from_no_past[:, -1, random_slice_idx].detach()
snake_case__ =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , ) -> Any:
snake_case__ =UMTaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).half().eval()
snake_case__ =model(**_UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_UpperCAmelCase ).any().item() )
@require_torch
class a__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Dict = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ : Dict = True
a_ : Union[str, Any] = False
a_ : Dict = False
a_ : Union[str, Any] = True
a_ : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ : List[str] = [0.8, 0.9]
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
snake_case__ =UMTaModel(config_and_inputs[0] ).to(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=_UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
snake_case__ =['encoder_attentions', 'decoder_attentions', 'cross_attentions']
snake_case__ =self.model_tester.prepare_config_and_inputs()
snake_case__ =config_and_inputs[0]
snake_case__ =UMTaForConditionalGeneration(_UpperCAmelCase ).eval()
model.to(_UpperCAmelCase )
snake_case__ ={
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(_UpperCAmelCase , head_masking.items() ):
snake_case__ ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case__ =torch.ones(
config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase )
snake_case__ =model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , **_UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case__ =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def _lowercase ( self ) -> str:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a__( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def _lowercase ( self ) -> Any:
snake_case__ =UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
snake_case__ =AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_UpperCAmelCase , legacy=_UpperCAmelCase )
snake_case__ =[
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
snake_case__ =tokenizer(_UpperCAmelCase , return_tensors='pt' , padding=_UpperCAmelCase ).input_ids
# fmt: off
snake_case__ =torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =model.generate(input_ids.to(_UpperCAmelCase ) )
snake_case__ =[
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
snake_case__ =tokenizer.batch_decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 581 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a ( ) -> Tuple:
raise RuntimeError('CUDA out of memory.' )
class a__( nn.Module ):
def __init__( self ) -> Dict:
super().__init__()
snake_case__ =nn.Linear(3 , 4 )
snake_case__ =nn.BatchNormad(4 )
snake_case__ =nn.Linear(4 , 5 )
def _lowercase ( self , _UpperCAmelCase ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(_UpperCAmelCase ) ) )
class a__( unittest.TestCase ):
def _lowercase ( self ) -> int:
snake_case__ =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(_UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_UpperCAmelCase , [128, 64, 32, 16, 8] )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCAmelCase , _UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(_UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case__ , snake_case__ =mock_training_loop_function('hello' )
self.assertListEqual(_UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _lowercase ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_UpperCAmelCase ):
pass
with self.assertRaises(_UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _lowercase ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _lowercase ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _lowercase ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _lowercase ( self ) -> Optional[int]:
snake_case__ =torch.cuda.memory_allocated()
snake_case__ =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _UpperCAmelCase )
snake_case__ =release_memory(_UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , _UpperCAmelCase )
| 581 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 ,node_index * 2 ,lowercase ,lowercase ,lowercase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowercase ,lowercase ,lowercase ) ,)
if is_max
else min(
minimax(depth + 1 ,node_index * 2 ,lowercase ,lowercase ,lowercase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowercase ,lowercase ,lowercase ) ,)
)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
snake_case : int = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case : str = math.log(len(lowercase ) ,2 )
print(f"""Optimal value : {minimax(0 ,0 ,lowercase ,lowercase ,lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 587 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : int = None
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase : List[str] = '▁'
# Segments (not really needed)
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 2
lowerCamelCase : Optional[int] = 3
lowerCamelCase : Optional[int] = 4
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = """left"""
_snake_case = XLNetTokenizer
def __init__( self , A=None , A=None , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , **A , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
snake_case : Optional[int] = 3
snake_case : str = do_lower_case
snake_case : Tuple = remove_space
snake_case : Optional[Any] = keep_accents
snake_case : Optional[int] = vocab_file
snake_case : List[str] = False if not self.vocab_file else True
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 587 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Any = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """distilbert"""
__lowercase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=5_12 , lowerCAmelCase_=False , lowerCAmelCase_=6 , lowerCAmelCase_=12 , lowerCAmelCase_=7_68 , lowerCAmelCase_=4 * 7_68 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.2 , lowerCAmelCase_=0 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = sinusoidal_pos_embds
_snake_case = n_layers
_snake_case = n_heads
_snake_case = dim
_snake_case = hidden_dim
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation
_snake_case = initializer_range
_snake_case = qa_dropout
_snake_case = seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 542 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float:
if (
not isinstance(__A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float:
if (
not isinstance(__A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_lowerCAmelCase : Dict , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = eval_examples
snake_case_ = post_process_function
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Dataset] = None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "eval" , **_lowerCAmelCase : Tuple , ) -> Dict[str, float]:
"""simple docstring"""
snake_case_ = gen_kwargs.copy()
snake_case_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
snake_case_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
snake_case_ = gen_kwargs
snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ = self.get_eval_dataloader(_lowerCAmelCase )
snake_case_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = time.time()
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
_lowerCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
snake_case_ = compute_metrics
snake_case_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case_ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case_ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
snake_case_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str = "test" , **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = gen_kwargs.copy()
snake_case_ = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = time.time()
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
_lowerCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
snake_case_ = compute_metrics
snake_case_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , "predict" )
snake_case_ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case_ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 283 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'wav2vec2'
def __init__( self : Any , _lowerCAmelCase : Optional[Any]=3_2 , _lowerCAmelCase : List[Any]=7_6_8 , _lowerCAmelCase : Dict=1_2 , _lowerCAmelCase : Tuple=1_2 , _lowerCAmelCase : Optional[int]=3_0_7_2 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Any=1e-5 , _lowerCAmelCase : Any="group" , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : int=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Union[str, Any]=1_6 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=0.05 , _lowerCAmelCase : List[str]=1_0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[Any]="sum" , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[str]=2_5_6 , _lowerCAmelCase : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : List[str]=(5, 3, 3, 1, 1) , _lowerCAmelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
snake_case_ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = xvector_output_dim
@property
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 283 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 438 | '''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_A = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = '''maskformer'''
A : List[str] = {'''hidden_size''': '''mask_feature_size'''}
A : Dict = ['''resnet''', '''swin''']
A : Optional[Any] = ['''detr''']
def __init__(self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_a , _a ):
lowercase_ : Dict = backbone_config.pop('model_type' )
lowercase_ : Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Tuple = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ : str = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ : List[Any] = (
decoder_config.pop('model_type' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
lowercase_ : List[Any] = CONFIG_MAPPING[decoder_type]
lowercase_ : Tuple = config_class.from_dict(_a )
lowercase_ : Optional[Any] = backbone_config
lowercase_ : int = decoder_config
# main feature dimension for the model
lowercase_ : List[Any] = fpn_feature_size
lowercase_ : Tuple = mask_feature_size
# initializer
lowercase_ : Optional[int] = init_std
lowercase_ : List[str] = init_xavier_std
# Hungarian matcher && loss
lowercase_ : int = cross_entropy_weight
lowercase_ : Optional[int] = dice_weight
lowercase_ : Dict = mask_weight
lowercase_ : Tuple = use_auxiliary_loss
lowercase_ : int = no_object_weight
lowercase_ : Union[str, Any] = output_auxiliary_logits
lowercase_ : List[Any] = self.decoder_config.encoder_attention_heads
lowercase_ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def _lowerCamelCase (cls , _a , _a , **_a ) -> Union[str, Any]:
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def _lowerCamelCase (self ) -> Dict[str, any]:
lowercase_ : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : Optional[Any] = self.decoder_config.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 438 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowercase__: List[str] = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__: Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = KandinskyVaaInpaintPipeline
SCREAMING_SNAKE_CASE__ = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
SCREAMING_SNAKE_CASE__ = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
SCREAMING_SNAKE_CASE__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Tuple = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a :int = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Any = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.dummy_unet
a :Any = self.dummy_movq
a :Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowerCamelCase , )
a :Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
a :int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :Any = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
a :Dict = np.ones((64, 64) , dtype=np.floataa )
a :Tuple = 0
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Dict = torch.manual_seed(_lowerCamelCase )
else:
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Tuple = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = '''cpu'''
a :List[Any] = self.get_dummy_components()
a :int = self.pipeline_class(**_lowerCamelCase )
a :Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
a :Optional[int] = output.images
a :List[str] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
a :Optional[int] = image[0, -3:, -3:, -1]
a :Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
a :str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
a :Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
a :List[Any] = np.ones((768, 768) , dtype=np.floataa )
a :Optional[Any] = 0
a :List[Any] = '''a hat'''
a :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
a :List[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
a :str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
a :Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
a , a :Optional[Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
a :List[str] = pipeline(
image=_lowerCamelCase , mask_image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
a :Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 445 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowerCAmelCase ( snake_case__ ):
if "img_encoder.pos_embed" in name:
__UpperCamelCase : str = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
__UpperCamelCase : Tuple = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
__UpperCamelCase : Tuple = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
__UpperCamelCase : Tuple = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
__UpperCamelCase : Dict = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
__UpperCamelCase : Any = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
__UpperCamelCase : Any = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
__UpperCamelCase : int = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
__UpperCamelCase : Union[str, Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
__UpperCamelCase : Dict = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
__UpperCamelCase : Optional[int] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
__UpperCamelCase : Optional[int] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
__UpperCamelCase : Optional[int] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
__UpperCamelCase : Optional[Any] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
__UpperCamelCase : List[Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
__UpperCamelCase : str = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
__UpperCamelCase : Optional[int] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
__UpperCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
__UpperCamelCase : int = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
__UpperCamelCase : List[str] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
__UpperCamelCase : List[str] = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
__UpperCamelCase : List[Any] = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
__UpperCamelCase : Any = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
__UpperCamelCase : Dict = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Union[str, Any] = orig_state_dict.pop(a_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__UpperCamelCase : List[str] = key.split("." )
__UpperCamelCase : Dict = int(key_split[2] ), int(key_split[4] )
__UpperCamelCase : Any = config.vision_config.hidden_size
if "weight" in key:
__UpperCamelCase : str = val[:dim, :]
__UpperCamelCase : List[str] = val[dim : dim * 2, :]
__UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
__UpperCamelCase : str = val[:dim]
__UpperCamelCase : Dict = val[dim : dim * 2]
__UpperCamelCase : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__UpperCamelCase : Any = key.split("." )
__UpperCamelCase : Any = int(key_split[3] )
__UpperCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
__UpperCamelCase : Tuple = val[:dim, :]
__UpperCamelCase : str = val[
dim : dim * 2, :
]
__UpperCamelCase : List[str] = val[-dim:, :]
else:
__UpperCamelCase : Tuple = val[:dim]
__UpperCamelCase : Any = val[dim : dim * 2]
__UpperCamelCase : List[Any] = val[-dim:]
else:
__UpperCamelCase : List[Any] = rename_key(a_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__UpperCamelCase : Union[str, Any] = val.squeeze_()
else:
__UpperCamelCase : Dict = val
return orig_state_dict
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase : Any = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__="groupvit-gcc-yfcc" , snake_case__=False ):
__UpperCamelCase : Tuple = GroupViTConfig()
__UpperCamelCase : Optional[int] = GroupViTModel(a_ ).eval()
__UpperCamelCase : str = torch.load(a_ , map_location="cpu" )['''model''']
__UpperCamelCase : str = convert_state_dict(a_ , a_ )
__UpperCamelCase : List[Any] = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a_ ) == 0)
# verify result
__UpperCamelCase : Tuple = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
__UpperCamelCase : List[str] = prepare_img()
__UpperCamelCase : List[str] = processor(text=["a photo of a cat", "a photo of a dog"] , images=a_ , padding=a_ , return_tensors="pt" )
with torch.no_grad():
__UpperCamelCase : str = model(**a_ )
if model_name == "groupvit-gcc-yfcc":
__UpperCamelCase : Union[str, Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
__UpperCamelCase : List[Any] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , a_ , atol=1E-3 )
processor.save_pretrained(a_ )
model.save_pretrained(a_ )
print("Successfully saved processor and model to" , a_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(a_ , organization="nielsr" )
model.push_to_hub(a_ , organization="nielsr" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
_lowerCAmelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 399 | 0 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__a: int = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
__a: Optional[int] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
__a: int = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return float((preds == labels).mean() )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ : Tuple = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = np.array(lowerCamelCase_ )
lowercase__ : List[str] = np.array(lowerCamelCase_ )
lowercase__ : List[Any] = en_sentvecs.shape[0]
# mean centering
lowercase__ : List[str] = en_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowercase__ : Dict = in_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowercase__ : Tuple = cdist(lowerCamelCase_ , lowerCamelCase_ , '''cosine''' )
lowercase__ : int = np.array(range(lowerCamelCase_ ) )
lowercase__ : int = sim.argsort(axis=1 )[:, :10]
lowercase__ : Dict = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''
'''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''
'''\"wiki-ner\"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''
'''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''
'''\"wiki-ner\"]''' )
| 152 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
lowercase : Optional[int] =['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_55 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase = int((2_56 / 2_24) * size["shortest_edge"] )
UpperCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
UpperCamelCase__ , size=(size_dict["height"], size_dict["width"]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ) -> BatchFeature:
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
UpperCAmelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 323 | 0 |
_A = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> int:
"""simple docstring"""
a_ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
a_ = Stack()
a_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
a_ = operator_stack.peek()
operator_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}') | 701 |
import math
_A = 10
_A = 7
_A = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 20 ) -> str:
"""simple docstring"""
a_ = math.comb(UpperCamelCase , UpperCamelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20)) | 403 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE__ = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Any , _snake_case : int , _snake_case : Optional[Any]="</s>" , _snake_case : Any="<unk>" , _snake_case : Optional[int]="<pad>" , _snake_case : List[Any]=1_00 , _snake_case : str=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : Tuple=True , **_snake_case : List[str] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
A__ = [F'''<extra_id_{i}>''' for i in range(_snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A__ = len(set(filter(lambda _snake_case : bool('extra_id' in str(_snake_case ) ) , _snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
A__ = legacy
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=_snake_case , **_snake_case , )
A__ = vocab_file
A__ = extra_ids
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@staticmethod
def _a ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Dict ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _snake_case , )
return max_model_length
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def _a ( self : List[str] ):
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case )) + [1]
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Any ):
"""simple docstring"""
return list(
set(filter(lambda _snake_case : bool(re.search(R'<extra_id_\d+>' , _snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
return [self._convert_token_to_id(_snake_case ) for token in self.get_sentinel_tokens()]
def _a ( self : List[Any] , _snake_case : List[int] ):
"""simple docstring"""
if len(_snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = self._add_eos_if_not_present(_snake_case )
if token_ids_a is None:
return token_ids_a
else:
A__ = self._add_eos_if_not_present(_snake_case )
return token_ids_a + token_ids_a
def __getstate__( self : Tuple ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Optional[Any] , _snake_case : List[Any] ):
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] , _snake_case : "TextInput" , **_snake_case : Tuple ):
"""simple docstring"""
if not self.legacy:
A__ = SPIECE_UNDERLINE + text.replace(_snake_case , ' ' )
return super().tokenize(_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : Tuple , **_snake_case : int ):
"""simple docstring"""
if not self.legacy:
A__ = text.startswith(_snake_case )
if is_first:
A__ = text[1:]
A__ = self.sp_model.encode(_snake_case , out_type=_snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(_snake_case ):
A__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _a ( self : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
if token.startswith('<extra_id_' ):
A__ = re.match(R'<extra_id_(\d+)>' , _snake_case )
A__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_snake_case )
def _a ( self : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
A__ = self.sp_model.IdToPiece(_snake_case )
else:
A__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _a ( self : Any , _snake_case : Tuple ):
"""simple docstring"""
A__ = []
A__ = ''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(_snake_case )
A__ = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def _a ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 9 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase) , """Tatoeba directory does not exist.""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
self.resolver.convert_models(["heb-eng"] )
@slow
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 129 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = "sshleifer/bart-tiny-random"
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return AutoConfig.from_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=__UpperCamelCase , d=__UpperCamelCase )
| 129 | 1 |
import os
def _snake_case ( __snake_case = "input.txt" ):
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
_UpperCamelCase = [
[int(__snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(matrix[0] )
_UpperCamelCase = [[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
_UpperCamelCase = matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
_UpperCamelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : str = "codegen"
lowerCAmelCase__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict ,UpperCamelCase : List[Any]=5_0400 ,UpperCamelCase : Any=2048 ,UpperCamelCase : Any=2048 ,UpperCamelCase : List[str]=4096 ,UpperCamelCase : List[Any]=28 ,UpperCamelCase : Tuple=16 ,UpperCamelCase : List[Any]=64 ,UpperCamelCase : Optional[int]=None ,UpperCamelCase : str="gelu_new" ,UpperCamelCase : Optional[int]=0.0 ,UpperCamelCase : Dict=0.0 ,UpperCamelCase : str=0.0 ,UpperCamelCase : Union[str, Any]=1e-5 ,UpperCamelCase : Optional[int]=0.0_2 ,UpperCamelCase : List[str]=True ,UpperCamelCase : Any=5_0256 ,UpperCamelCase : List[Any]=5_0256 ,UpperCamelCase : List[Any]=False ,**UpperCamelCase : List[Any] ,) -> Union[str, Any]:
_lowercase : Union[str, Any] = vocab_size
_lowercase : Tuple = n_ctx
_lowercase : Any = n_positions
_lowercase : Optional[Any] = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Optional[Any] = n_head
_lowercase : Dict = n_inner
_lowercase : Optional[int] = rotary_dim
_lowercase : int = activation_function
_lowercase : str = resid_pdrop
_lowercase : str = embd_pdrop
_lowercase : Optional[int] = attn_pdrop
_lowercase : Any = layer_norm_epsilon
_lowercase : Tuple = initializer_range
_lowercase : Tuple = use_cache
_lowercase : List[str] = bos_token_id
_lowercase : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,**UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict ,UpperCamelCase : PretrainedConfig ,UpperCamelCase : str = "default" ,UpperCamelCase : List[PatchingSpec] = None ,UpperCamelCase : bool = False ,) -> List[str]:
super().__init__(UpperCamelCase ,task=UpperCamelCase ,patching_specs=UpperCamelCase ,use_past=UpperCamelCase )
if not getattr(self._config ,'pad_token_id' ,UpperCamelCase ):
# TODO: how to do that better?
_lowercase : Union[str, Any] = 0
@property
def _lowerCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase : Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase ,direction='inputs' )
_lowercase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Dict ) -> int:
return self._config.n_layer
@property
def _lowerCamelCase ( self : List[Any] ) -> int:
return self._config.n_head
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : PreTrainedTokenizer ,UpperCamelCase : int = -1 ,UpperCamelCase : int = -1 ,UpperCamelCase : bool = False ,UpperCamelCase : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase : int = super(UpperCamelCase ,self ).generate_dummy_inputs(
UpperCamelCase ,batch_size=UpperCamelCase ,seq_length=UpperCamelCase ,is_pair=UpperCamelCase ,framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
_lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : List[Any] = seqlen + 2
_lowercase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase : Tuple = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
_lowercase : int = common_inputs['attention_mask']
if self.use_past:
_lowercase : Optional[Any] = ordered_inputs['attention_mask'].dtype
_lowercase : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase ,UpperCamelCase ,dtype=UpperCamelCase )] ,dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : List[Any] ) -> int:
return 13 | 125 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__UpperCamelCase : Optional[Any] = parse(importlib.metadata.version('torch'))
def A ( _lowercase , _lowercase , _lowercase ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
SCREAMING_SNAKE_CASE : List[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase , parse(_lowercase ) )
def A ( _lowercase , _lowercase ):
return compare_versions(_lowercase , _lowercase , _lowercase )
| 34 | from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34 | 1 |
_UpperCamelCase : List[Any] =[
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCamelCase : Optional[Any] =[
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCamelCase : Any =[
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCamelCase : Tuple =[
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCamelCase : Optional[int] =[
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCamelCase : Optional[int] =[
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCamelCase : List[str] =[
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCamelCase : List[Any] =[
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 206 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
# load base model
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCamelCase ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase_ : Union[str, Any] = load_file(UpperCamelCase )
UpperCAmelCase_ : Any = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase_ : Optional[Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
UpperCAmelCase_ : str = pipeline.text_encoder
else:
UpperCAmelCase_ : List[str] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
UpperCAmelCase_ : List[str] = pipeline.unet
# find the target layer
UpperCAmelCase_ : Dict = layer_infos.pop(0 )
while len(UpperCamelCase ) > -1:
try:
UpperCAmelCase_ : List[str] = curr_layer.__getattr__(UpperCamelCase )
if len(UpperCamelCase ) > 0:
UpperCAmelCase_ : Tuple = layer_infos.pop(0 )
elif len(UpperCamelCase ) == 0:
break
except Exception:
if len(UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase_ : List[Any] = layer_infos.pop(0 )
UpperCAmelCase_ : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' ,'lora_up' ) )
pair_keys.append(UpperCamelCase )
else:
pair_keys.append(UpperCamelCase )
pair_keys.append(key.replace('lora_up' ,'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase_ : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase_ : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase ,UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase_ : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase_ : Tuple = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase ,UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase )
return pipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.base_model_path
lowerCAmelCase__ = args.checkpoint_path
lowerCAmelCase__ = args.dump_path
lowerCAmelCase__ = args.lora_prefix_unet
lowerCAmelCase__ = args.lora_prefix_text_encoder
lowerCAmelCase__ = args.alpha
lowerCAmelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 471 | 0 |
def _snake_case ( __snake_case ):
_UpperCamelCase = [0] * len(__snake_case )
for i in range(1 , len(__snake_case ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase = j
return prefix_result
def _snake_case ( __snake_case ):
return max(prefix_function(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'words.txt' )
A = ''
with open(snake_case__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution()) | 91 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PerceiverFeatureExtractor']
A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : int = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__snake_case :Dict = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __snake_case ( ):
__a = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__a = get_sagemaker_input()
else:
__a = get_cluster_input()
return config
def __snake_case ( _UpperCAmelCase=None ):
if subparsers is not None:
__a = subparsers.add_parser('''config''' , description=_UpperCAmelCase )
else:
__a = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __snake_case ( _UpperCAmelCase ):
__a = get_user_input()
if args.config_file is not None:
__a = args.config_file
else:
if not os.path.isdir(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
__a = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_UpperCAmelCase )
else:
config.to_yaml_file(_UpperCAmelCase )
print(f'accelerate configuration saved at {config_file}' )
def __snake_case ( ):
__a = config_command_parser()
__a = parser.parse_args()
config_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 60 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case :List[str] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case :List[str] =' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> List[str]:
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
A = self.transformer_dir
shutil.copy(
os.path.join(__UpperCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __UpperCamelCase ( self : Any ) -> str:
A = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple=None ) -> str:
A = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
A = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
A = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> str:
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
# Copy consistency with a really long name
A = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __UpperCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
A = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
A = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
self.assertFalse(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
A = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCamelCase )
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
A = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 106 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
lowercase : List[Any] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A__ ( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Any ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : Tuple ={'''dtype''': torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : int ={'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[Any] =np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '''__array__''' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : List[Any] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : dict ) -> Any:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : List[str] =self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowercase : Any =self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
lowercase : int =self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowercase : Tuple =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowercase : Optional[Any] =self.recursive_tensorize(UpperCAmelCase )
lowercase : Any =self._consolidate(UpperCAmelCase )
return column
def A__ ( self : str , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : Tuple =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowercase : List[str] =self.python_features_decoder.decode_batch(UpperCAmelCase )
lowercase : Dict =self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowercase : str =self._consolidate(batch[column_name] )
return batch
| 94 | 0 |
UpperCamelCase__ : Dict = 0 # The first color of the flag.
UpperCamelCase__ : Tuple = 1 # The second color of the flag.
UpperCamelCase__ : Union[str, Any] = 2 # The third color of the flag.
UpperCamelCase__ : Dict = (red, white, blue)
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase_ ) == 1:
return list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE_ : Any = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(lowerCamelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip()
UpperCamelCase__ : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 546 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations(UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ , UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase__ )
for item in array )
__lowerCamelCase = answer
return answer
__lowerCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = [0] * (target + 1)
__lowerCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =3
__UpperCAmelCase =5
__UpperCAmelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 546 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase__ ="__DUMMY_TRANSFORMERS_USER__"
UpperCAmelCase__ ="Dummy User"
UpperCAmelCase__ ="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"
UpperCAmelCase__ ="https://hub-ci.huggingface.co"
UpperCAmelCase__ =CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCAmelCase__ =CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCAmelCase__ =Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCamelCase__ )
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCamelCase__ )
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCamelCase__ )
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
HfFolder.save_token(UpperCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
"""simple docstring"""
return HfApi(endpoint=UpperCamelCase__ )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( UpperCamelCase__ : HfApi ):
"""simple docstring"""
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase__ )
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
def _cleanup_repo(UpperCamelCase__ : List[str] ):
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCamelCase__ : int ):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase__ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( UpperCamelCase__ : HfApi , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
__lowercase = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( UpperCamelCase__ : HfApi , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__lowercase = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
__lowercase = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( UpperCamelCase__ : HfApi , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
__lowercase = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 712 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = min(UpperCamelCase__ ) # min() finds the minimum value
__lowercase = max(UpperCamelCase__ ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print("""Sorted order is:""" , """ """.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 442 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_a = get_logger()
_a = None
class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(features=__lowerCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F'Expected {device} to be a `str` not {type(__lowerCAmelCase )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
lowerCamelCase__ = device if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
lowerCamelCase__ = str(jax.devices()[0] )
lowerCamelCase__ = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__lowerCAmelCase ): device for device in jax.devices()}
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCAmelCase , axis=0 )
return column
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ = {'''dtype''': jnp.intaa}
else:
lowerCamelCase__ = {'''dtype''': jnp.intaa}
elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCAmelCase , '''__array__''' ) and not isinstance(__lowerCAmelCase , jax.Array ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCAmelCase )
lowerCamelCase__ = self._consolidate(__lowerCAmelCase )
return column
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCAmelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 481 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_a = logging.get_logger(__name__)
# General docstring
_a = "PoolFormerConfig"
# Base docstring
_a = "sail/poolformer_s12"
_a = [1, 512, 7, 7]
# Image classification docstring
_a = "sail/poolformer_s12"
_a = "tabby, tabby cat"
_a = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase__(__snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
lowerCamelCase__ = 1 - drop_prob
lowerCamelCase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCamelCase__ = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
lowerCamelCase__ = input.div(__snake_case ) * random_tensor
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = drop_prob
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return drop_path(__lowerCAmelCase , self.drop_prob , self.training )
def __lowerCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = patch_size if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCamelCase__ = stride if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowerCamelCase__ = padding if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowerCamelCase__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase )
lowerCamelCase__ = norm_layer(__lowerCAmelCase ) if norm_layer else nn.Identity()
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.projection(__lowerCAmelCase )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
return embeddings
class __A ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(1 , __lowerCAmelCase , **__lowerCAmelCase )
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.AvgPoolad(__lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.pool(__lowerCAmelCase ) - hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowerCamelCase__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowerCamelCase__ = PoolFormerDropPath(__lowerCAmelCase )
if isinstance(config.hidden_act , __lowerCAmelCase ):
lowerCamelCase__ = ACTaFN[config.hidden_act]
else:
lowerCamelCase__ = config.hidden_act
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conva(__lowerCAmelCase )
lowerCamelCase__ = self.act_fn(__lowerCAmelCase )
lowerCamelCase__ = self.drop(__lowerCAmelCase )
lowerCamelCase__ = self.conva(__lowerCAmelCase )
lowerCamelCase__ = self.drop(__lowerCAmelCase )
return hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = PoolFormerPooling(__lowerCAmelCase )
lowerCamelCase__ = PoolFormerOutput(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = PoolFormerGroupNorm(__lowerCAmelCase )
lowerCamelCase__ = PoolFormerGroupNorm(__lowerCAmelCase )
# Useful for training neural nets
lowerCamelCase__ = PoolFormerDropPath(__lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowerCamelCase__ = config.use_layer_scale
if config.use_layer_scale:
lowerCamelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
lowerCamelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if self.use_layer_scale:
lowerCamelCase__ = self.pooling(self.before_norm(__lowerCAmelCase ) )
lowerCamelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCamelCase__ = hidden_states + self.drop_path(__lowerCAmelCase )
lowerCamelCase__ = ()
lowerCamelCase__ = self.output(self.after_norm(__lowerCAmelCase ) )
lowerCamelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCamelCase__ = hidden_states + self.drop_path(__lowerCAmelCase )
lowerCamelCase__ = (output,) + outputs
return outputs
else:
lowerCamelCase__ = self.drop_path(self.pooling(self.before_norm(__lowerCAmelCase ) ) )
# First residual connection
lowerCamelCase__ = pooling_output + hidden_states
lowerCamelCase__ = ()
# Second residual connection inside the PoolFormerOutput block
lowerCamelCase__ = self.drop_path(self.output(self.after_norm(__lowerCAmelCase ) ) )
lowerCamelCase__ = hidden_states + layer_output
lowerCamelCase__ = (output,) + outputs
return outputs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = config
# stochastic depth decay rule
lowerCamelCase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCamelCase__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCamelCase__ = nn.ModuleList(__lowerCAmelCase )
# Transformer blocks
lowerCamelCase__ = []
lowerCamelCase__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCamelCase__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCAmelCase ) )
lowerCamelCase__ = nn.ModuleList(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
'''simple docstring'''
lowerCamelCase__ = () if output_hidden_states else None
lowerCamelCase__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCamelCase__ , lowerCamelCase__ = layers
# Get patch embeddings from hidden_states
lowerCamelCase__ = embedding_layer(__lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = blk(__lowerCAmelCase )
lowerCamelCase__ = layer_outputs[0]
if output_hidden_states:
lowerCamelCase__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = PoolFormerConfig
lowerCAmelCase_ = """poolformer"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = True
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = value
_a = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
lowerCamelCase__ = config
lowerCamelCase__ = PoolFormerEncoder(__lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
'''simple docstring'''
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCamelCase__ = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
lowerCamelCase__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Linear(config.hidden_size , config.hidden_size )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.dense(__lowerCAmelCase )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = PoolFormerModel(__lowerCAmelCase )
# Final norm
lowerCamelCase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCamelCase__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
'''simple docstring'''
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.poolformer(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
lowerCamelCase__ = outputs[0]
lowerCamelCase__ = self.classifier(self.norm(__lowerCAmelCase ).mean([-2, -1] ) )
lowerCamelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ = '''single_label_classification'''
else:
lowerCamelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCamelCase__ = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ = CrossEntropyLoss()
lowerCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ = BCEWithLogitsLoss()
lowerCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
lowerCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 481 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_snake_case : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase: List[str] = model_type_to_module_name(_UpperCAmelCase )
UpperCAmelCase: Tuple = importlib.import_module(F'.{module_name}' , "transformers.models" )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , "__name__" , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase: Optional[int] = importlib.import_module("transformers" )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] = None , snake_case_ : Any = False , snake_case_ : List[Any] = False , snake_case_ : List[Any] = None , snake_case_ : Optional[int] = None , snake_case_ : Any = None , snake_case_ : str = False , **snake_case_ : List[str] , ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_UpperCAmelCase , encoding="utf-8" ) as reader:
return json.load(_UpperCAmelCase )
class __lowerCamelCase :
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__SCREAMING_SNAKE_CASE )
def A__ ( cls , __snake_case , **__snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase: Tuple = kwargs.pop("config" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase: Optional[int] = kwargs.pop("trust_remote_code" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase: Dict = True
UpperCAmelCase , UpperCAmelCase: Any = FeatureExtractionMixin.get_feature_extractor_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCAmelCase: Optional[Any] = config_dict.get("feature_extractor_type" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase: int = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCAmelCase: Optional[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase: List[Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# It could be in `config.feature_extractor_type``
UpperCAmelCase: Dict = getattr(__SCREAMING_SNAKE_CASE , "feature_extractor_type" , __SCREAMING_SNAKE_CASE )
if hasattr(__SCREAMING_SNAKE_CASE , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase: Union[str, Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCAmelCase: str = feature_extractor_class_from_name(__SCREAMING_SNAKE_CASE )
UpperCAmelCase: Dict = feature_extractor_auto_map is not None
UpperCAmelCase: str = feature_extractor_class is not None or type(__SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase: Optional[Any] = resolve_trust_remote_code(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if has_remote_code and trust_remote_code:
UpperCAmelCase: Tuple = get_class_from_dynamic_module(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCAmelCase: int = kwargs.pop("code_revision" , __SCREAMING_SNAKE_CASE )
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase: Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(__SCREAMING_SNAKE_CASE )]
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def A__ ( __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 716 |
def __UpperCAmelCase ( snake_case_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
'''simple docstring'''
try:
UpperCAmelCase: Optional[int] = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase: List[Any] = 2
UpperCAmelCase: Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase: Tuple = i
while n % i == 0:
UpperCAmelCase: Union[str, Any] = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 166 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """depth_multiplier""" ) )
class A :
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Tuple=0.25 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=1024 , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : List[str]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = tf_padding
lowercase__ = int(last_hidden_size * depth_multiplier )
lowercase__ = output_stride
lowercase__ = hidden_act
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A__ = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 26
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
_SCREAMING_SNAKE_CASE = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 591 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
if "resnet-50" in model_name:
_a = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_a = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_a = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
_a = """panoptic""" in model_name
if is_panoptic:
_a = 2_50
else:
_a = 91
_a = """huggingface/label-files"""
_a = """coco-detection-id2label.json"""
_a = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
_a = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _A (lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
_a = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
_a = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_a = val
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=False ) -> Optional[Any]:
'''simple docstring'''
_a = """"""
if is_panoptic:
_a = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:2_56, :]
_a = in_proj_bias[:2_56]
_a = in_proj_weight[2_56:5_12, :]
_a = in_proj_bias[2_56:5_12]
_a = in_proj_weight[-2_56:, :]
_a = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:2_56, :]
_a = in_proj_bias[:2_56]
_a = in_proj_weight[2_56:5_12, :]
_a = in_proj_bias[2_56:5_12]
_a = in_proj_weight[-2_56:, :]
_a = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
_a = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a = in_proj_weight_cross_attn[:2_56, :]
_a = in_proj_bias_cross_attn[:2_56]
_a = in_proj_weight_cross_attn[2_56:5_12, :]
_a = in_proj_bias_cross_attn[2_56:5_12]
_a = in_proj_weight_cross_attn[-2_56:, :]
_a = in_proj_bias_cross_attn[-2_56:]
def _A () -> Union[str, Any]:
'''simple docstring'''
_a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=False ) -> List[Any]:
'''simple docstring'''
_a = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
_a = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'Converting model {model_name}...' )
_a = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
_a = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
_a = """detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_a = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_a = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_a = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_a = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_a = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_a = val
# finally, create HuggingFace model and load state dict
_a = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
_a = """coco_panoptic""" if is_panoptic else """coco_detection"""
_a = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
_a = processor(images=prepare_img() , return_tensors='pt' )
_a = encoding["""pixel_values"""]
_a = detr(SCREAMING_SNAKE_CASE__ )
_a = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
a_ : Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _A (lowerCAmelCase__ :SplitDict ) -> Any:
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_a = SplitDict._from_yaml_list(lowerCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase__ ), SplitInfo(dataset_name='my_dataset' )] )
def _A (lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 532 | 0 |
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ):
lowerCAmelCase__ : List[Any] = row, column
lowerCAmelCase__ : List[Any] = [[default_value for c in range(SCREAMING_SNAKE_CASE__ )] for r in range(SCREAMING_SNAKE_CASE__ )]
def __str__( self ):
lowerCAmelCase__ : List[str] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCAmelCase__ : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase__ : str = max(SCREAMING_SNAKE_CASE__ , len(str(SCREAMING_SNAKE_CASE__ ) ) )
lowerCAmelCase__ : Optional[int] = f"""%{max_element_length}s"""
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
lowerCAmelCase__ : List[str] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE__ ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def __magic_name__( self , __UpperCAmelCase ):
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = value
def __add__( self , __UpperCAmelCase ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Any = self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCAmelCase__ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Tuple = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ):
return self + (-another)
def __mul__( self , __UpperCAmelCase ):
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # Scalar multiplication
lowerCAmelCase__ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : List[Any] = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase__ : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase__ : Any = f"""Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__ )})"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Any = self[r, c]
return result
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase__ : List[Any] = v.transpose()
lowerCAmelCase__ : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCAmelCase ( ) -> None:
lowerCAmelCase__ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase__ : int = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCAmelCase__ : Dict = Matrix(3 , 1 , 0 )
lowerCAmelCase__ : Tuple = 1, 2, -3
lowerCAmelCase__ : List[Any] = Matrix(3 , 1 , 0 )
lowerCAmelCase__ : Union[str, Any] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__A , __A )}""" )
def __lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 678 |
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE_ ( __A : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(__A , 0 , __A , args=(__A) )[0]
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float ) -> float:
"""simple docstring"""
return math.pow(__A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 570 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "marian"
__magic_name__ : Optional[int] = ["past_key_values"]
__magic_name__ : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , lowerCAmelCase : Union[str, Any]=58101 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str=1024 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : str=4096 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Union[str, Any]=4096 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : str="gelu" , lowerCAmelCase : Optional[int]=1024 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : str=58100 , lowerCAmelCase : List[str]=False , lowerCAmelCase : Union[str, Any]=58100 , lowerCAmelCase : int=0 , lowerCAmelCase : str=0 , lowerCAmelCase : str=True , **lowerCAmelCase : Any , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class UpperCamelCase__( lowerCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def a__( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(UpperCamelCase_ , self ).outputs
if self.use_past:
UpperCAmelCase = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a__( self : List[str] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase = common_inputs['input_ids'].shape
UpperCAmelCase = common_inputs['decoder_input_ids'].shape[1]
UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase = self.num_layers
UpperCAmelCase = min(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
UpperCAmelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def a__( self : Dict , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = self.num_layers
UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['attention_mask'].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def a__( self : int , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCAmelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def a__( self : Optional[int] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def a__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCAmelCase = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def a__( self : List[Any] )-> float:
"""simple docstring"""
return 1E-4
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase =list[list[float | int]]
def _A ( _a : Matrix , _a : Matrix ):
"""simple docstring"""
A = len(__UpperCamelCase )
A = [[0 for _ in range(size + 1 )] for _ in range(__UpperCamelCase )]
A = 4_2
A = 4_2
A = 4_2
A = 4_2
A = 4_2
A = 4_2
for row in range(__UpperCamelCase ):
for col in range(__UpperCamelCase ):
A = matrix[row][col]
A = vector[row][0]
A = 0
A = 0
while row < size and col < size:
# pivoting
A = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCamelCase , __UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A , A = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCamelCase ):
A = augmented[rowa][col] / augmented[row][col]
A = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCamelCase ):
for row in range(__UpperCamelCase ):
A = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(__UpperCamelCase )
]
def _A ( _a : list[int] ):
"""simple docstring"""
A = len(__UpperCamelCase )
A = [[0 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
A = [[0] for _ in range(__UpperCamelCase )]
A = 4_2
A = 4_2
A = 4_2
A = 4_2
for x_val, y_val in enumerate(__UpperCamelCase ):
for col in range(__UpperCamelCase ):
A = (x_val + 1) ** (size - col - 1)
A = y_val
A = solve(__UpperCamelCase , __UpperCamelCase )
def interpolated_func(_a : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCamelCase ) )
return interpolated_func
def _A ( _a : int ):
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _A ( _a : Callable[[int], int] = question_function , _a : int = 1_0 ):
"""simple docstring"""
A = [func(__UpperCamelCase ) for x_val in range(1 , order + 1 )]
A = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A = 0
A = 4_2
A = 4_2
for poly in polynomials:
A = 1
while func(__UpperCamelCase ) == poly(__UpperCamelCase ):
x_val += 1
ret += poly(__UpperCamelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 617 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[2, 2, 3, 2] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=["stage2", "stage3", "stage4"] , UpperCamelCase__=[2, 3, 4] , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_stages
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = intermediate_size
A_ = hidden_act
A_ = num_labels
A_ = initializer_range
A_ = out_features
A_ = out_indices
A_ = scope
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> str:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
A_ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ = None
A_ = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
A_ = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ConvNextVaModelTester(self )
A_ = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> str:
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
A_ = model_class(a_ )
model.to(a_ )
model.train()
A_ = self._prepare_for_class(a_ , a_ , return_labels=a_ )
A_ = model(**a_ ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A_ = self.model_tester.prepare_config_and_inputs_with_labels()
A_ = False
A_ = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
A_ = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(a_ , a_ , return_labels=a_ )
A_ = model(**a_ ).loss
loss.backward()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(a_ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(a_ , a_ ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ( ) -> List[str]:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = preprocessor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
A_ = model(**a_ )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
A_ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.