code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a = NewType('DataClass', Any)
_a = NewType('DataClassType', Any)
def __a ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = {str(__lowerCamelCase ): choice for choice in choices}
return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase, __lowerCamelCase )
def __a ( *,
__lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = dataclasses.MISSING, __lowerCamelCase = dataclasses.MISSING, __lowerCamelCase = None, **__lowerCamelCase, ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ : Optional[Any] = {}
if aliases is not None:
UpperCAmelCase_ : List[str] = aliases
if help is not None:
UpperCAmelCase_ : List[Any] = help
return dataclasses.field(metadata=__lowerCamelCase, default=__lowerCamelCase, default_factory=__lowerCamelCase, **__lowerCamelCase )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , lowercase_ , **lowercase_ ):
"""simple docstring"""
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ : int = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
UpperCAmelCase_ : Tuple = [dataclass_types]
UpperCAmelCase_ : Optional[Any] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def UpperCamelCase__ ( lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = F"""--{field.name}"""
UpperCAmelCase_ : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowercase_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("aliases" , [] )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Any = [aliases]
UpperCAmelCase_ : Optional[Any] = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowercase_ , "UnionType" ) and isinstance(lowercase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field '{field.name}'.""" )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ : Dict = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ : str = (
field.type.__args__[0] if isinstance(lowercase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ : List[str] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , lowercase_ ) and issubclass(field.type , lowercase_ )):
if origin_type is Literal:
UpperCAmelCase_ : List[Any] = field.type.__args__
else:
UpperCAmelCase_ : Union[str, Any] = [x.value for x in field.type]
UpperCAmelCase_ : Optional[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ : str = field.default
else:
UpperCAmelCase_ : str = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ : int = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ : Union[str, Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ : Optional[int] = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ : int = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ : Tuple = True
elif isclass(lowercase_ ) and issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[str] = field.type.__args__[0]
UpperCAmelCase_ : str = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ : Optional[Any] = True
else:
UpperCAmelCase_ : Tuple = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ : Any = field.default_factory()
else:
UpperCAmelCase_ : List[Any] = True
parser.add_argument(lowercase_ , *lowercase_ , **lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ : Any = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if hasattr(lowercase_ , "_argument_group_name" ):
UpperCAmelCase_ : int = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ : int = self
try:
UpperCAmelCase_ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase_ ):
UpperCAmelCase_ : Dict = ".".join(map(lowercase_ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
UpperCAmelCase_ : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=None , lowercase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ : int = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(lowercase_ , type=lowercase_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = args_file_parser.parse_known_args(args=lowercase_ )
UpperCAmelCase_ : List[str] = vars(lowercase_ ).get(args_file_flag.lstrip("-" ) , lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
UpperCAmelCase_ : int = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ : Optional[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.parse_known_args(args=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = []
for dtype in self.dataclass_types:
UpperCAmelCase_ : List[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
UpperCAmelCase_ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = set(args.keys() )
UpperCAmelCase_ : Tuple = []
for dtype in self.dataclass_types:
UpperCAmelCase_ : Optional[int] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
UpperCAmelCase_ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ : int = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}""" )
return tuple(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ):
"""simple docstring"""
with open(Path(lowercase_ ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ : Tuple = json.loads(open_json_file.read() )
UpperCAmelCase_ : Optional[int] = self.parse_dict(lowercase_ , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 61
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_A = True
from torch.cuda.amp import autocast
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : Optional[bool] = field(
default=A_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self , A_ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCamelCase =[{'input_values': feature['input_values']} for feature in features]
__UpperCamelCase =[{'input_ids': feature['labels']} for feature in features]
__UpperCamelCase =self.processor.pad(
A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__UpperCamelCase =self.processor.pad(
labels=A_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__UpperCamelCase =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCamelCase =labels
return batch
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> torch.Tensor:
model.train()
__UpperCamelCase =self._prepare_inputs(A_ )
if self.use_amp:
with autocast():
__UpperCamelCase =self.compute_loss(A_ , A_ )
else:
__UpperCamelCase =self.compute_loss(A_ , A_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCamelCase =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCamelCase =loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__UpperCamelCase =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A_ ).backward()
elif self.use_apex:
with amp.scale_loss(A_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A_ )
else:
loss.backward()
return loss.detach()
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCamelCase =datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCamelCase =datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__UpperCamelCase =F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =re.sub(SCREAMING_SNAKE_CASE__ , '' , batch['sentence'] ).lower() + ' '
return batch
__UpperCamelCase =train_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
__UpperCamelCase =eval_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =' '.join(batch['text'] )
__UpperCamelCase =list(set(SCREAMING_SNAKE_CASE__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , )
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , )
__UpperCamelCase =list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__UpperCamelCase ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE__ )}
__UpperCamelCase =vocab_dict[' ']
del vocab_dict[" "]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase =WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__UpperCamelCase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCamelCase =min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples )
__UpperCamelCase =train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
if data_args.max_val_samples is not None:
__UpperCamelCase =eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCamelCase =torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase , __UpperCamelCase =torchaudio.load(batch['path'] )
__UpperCamelCase =resampler(SCREAMING_SNAKE_CASE__ ).squeeze().numpy()
__UpperCamelCase =1_60_00
__UpperCamelCase =batch['text']
return batch
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
__UpperCamelCase =processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE__ )
return batch
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCamelCase =datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =pred.predictions
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
__UpperCamelCase =processor.tokenizer.pad_token_id
__UpperCamelCase =processor.batch_decode(SCREAMING_SNAKE_CASE__ )
# we do not want to group tokens when computing the metrics
__UpperCamelCase =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCamelCase =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
# Initialize our Trainer
__UpperCamelCase =CTCTrainer(
model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCamelCase =model_args.model_name_or_path
else:
__UpperCamelCase =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
__UpperCamelCase =train_result.metrics
__UpperCamelCase =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
__UpperCamelCase ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase =trainer.evaluate()
__UpperCamelCase =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main()
| 62
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Tuple ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 63
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 64
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'align_text_model'
def __init__(self : Dict , __UpperCAmelCase : List[str]=3_0_5_2_2 , __UpperCAmelCase : str=7_6_8 , __UpperCAmelCase : int=1_2 , __UpperCAmelCase : List[str]=1_2 , __UpperCAmelCase : Any=3_0_7_2 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=5_1_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : int=1E-12 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : str=True , **__UpperCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def lowercase_ (cls : Tuple , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'align_vision_model'
def __init__(self : Any , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 6_0_0 , __UpperCAmelCase : float = 2.0 , __UpperCAmelCase : float = 3.1 , __UpperCAmelCase : int = 8 , __UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __UpperCAmelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCAmelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCAmelCase : List[int] = [] , __UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __UpperCAmelCase : float = 0.25 , __UpperCAmelCase : str = "swish" , __UpperCAmelCase : int = 2_5_6_0 , __UpperCAmelCase : str = "mean" , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 0.001 , __UpperCAmelCase : float = 0.99 , __UpperCAmelCase : float = 0.2 , **__UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(__UpperCAmelCase ) * 4
@classmethod
def lowercase_ (cls : List[str] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = 'align'
__UpperCAmelCase : str = True
def __init__(self : str , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=6_4_0 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]=0.02 , **__UpperCAmelCase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
UpperCAmelCase__ = AlignTextConfig(**__UpperCAmelCase )
UpperCAmelCase__ = AlignVisionConfig(**__UpperCAmelCase )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def lowercase_ (cls : int , __UpperCAmelCase : AlignTextConfig , __UpperCAmelCase : AlignVisionConfig , **__UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 65
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase ="pt"
elif is_tf_available():
__UpperCAmelCase ="tf"
else:
__UpperCAmelCase ="jax"
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =PerceiverTokenizer
lowerCamelCase : Tuple =False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict , **a : str ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : Any=False , a : Any=20 , a : Union[str, Any]=5 ):
"""simple docstring"""
__lowerCamelCase = []
for i in range(len(a ) ):
try:
__lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase = list(filter(lambda a : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , a ) )
__lowerCamelCase = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a ) , a ) )
if max_length is not None and len(a ) > max_length:
__lowerCamelCase = toks[:max_length]
if min_length is not None and len(a ) < min_length and len(a ) > 0:
while len(a ) < min_length:
__lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase = tokenizer.decode(a , clean_up_tokenization_spaces=a )
if " " not in output_txt and len(a ) > 1:
__lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a )
)
if with_prefix_space:
__lowerCamelCase = ''' ''' + output_txt
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = '''Unicode €.'''
__lowerCamelCase = tokenizer(a )
__lowerCamelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['''input_ids'''] , a )
# decoding
__lowerCamelCase = tokenizer.decode(a )
self.assertEqual(a , '''[CLS]Unicode €.[SEP]''' )
__lowerCamelCase = tokenizer('''e è é ê ë''' )
__lowerCamelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['''input_ids'''] , a )
# decoding
__lowerCamelCase = tokenizer.decode(a )
self.assertEqual(a , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowerCamelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__lowerCamelCase = tokenizer(a , padding=a , return_tensors=a )
self.assertIsInstance(a , a )
if FRAMEWORK != "jax":
__lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a , a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowerCamelCase = tokenizer(a , padding=a , return_tensors=a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , a )
self.assertIn('''attention_mask''' , a )
self.assertNotIn('''decoder_input_ids''' , a )
self.assertNotIn('''decoder_attention_mask''' , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowerCamelCase = tokenizer(
text_target=a , max_length=32 , padding='''max_length''' , truncation=a , return_tensors=a )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a )
__lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
shutil.rmtree(a )
__lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a )
__lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a )
with open(os.path.join(a , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(a )
with open(os.path.join(a , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(a )
__lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(1_25 )]
__lowerCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowerCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(a , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a , a )
with open(os.path.join(a , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a , a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase = tokenizer_class.from_pretrained(
a , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=a )]
__lowerCamelCase = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , '''�''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizers(fast=a , do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__lowerCamelCase = tokenizer.convert_tokens_to_string(a )
self.assertIsInstance(a , a )
| 67
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=False , ) -> List[Any]:
'''simple docstring'''
A__ = size if size is not None else {"height": 20, "width": 20}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_reduce_labels
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(dataset[0]["file"] )
A__ = Image.open(dataset[1]["file"] )
return image, map
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = Image.open(ds[1]["file"] )
A__ = Image.open(ds[2]["file"] )
A__ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
A__ = []
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A__ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
A__ , A__ = prepare_semantic_batch_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
A__ = True
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 68
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 42
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self, lowerCAmelCase__ = 6_5536, lowerCAmelCase__ = None, lowerCAmelCase__ = 2, lowerCAmelCase__ = 2, lowerCAmelCase__ = 0, lowerCAmelCase__ = "fourier", lowerCAmelCase__ = True, lowerCAmelCase__ = False, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCAmelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCAmelCase__ = "UNetMidBlock1D", lowerCAmelCase__ = None, lowerCAmelCase__ = (32, 32, 64), lowerCAmelCase__ = None, lowerCAmelCase__ = 8, lowerCAmelCase__ = 1, lowerCAmelCase__ = False, ) -> Union[str, Any]:
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=lowerCAmelCase__, log=lowerCAmelCase__, flip_sin_to_cos=lowerCAmelCase__)
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0], flip_sin_to_cos=lowerCAmelCase__, downscale_freq_shift=lowerCAmelCase__)
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=lowerCAmelCase__, time_embed_dim=lowerCAmelCase__, act_fn=lowerCAmelCase__, out_dim=block_out_channels[0], )
snake_case_ = nn.ModuleList([])
snake_case_ = None
snake_case_ = nn.ModuleList([])
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_down_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(lowerCAmelCase__)
# mid
snake_case_ = get_mid_block(
lowerCAmelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCAmelCase__, add_downsample=lowerCAmelCase__, )
# up
snake_case_ = list(reversed(lowerCAmelCase__))
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__) - 1 else final_upsample_channels
)
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_up_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(lowerCAmelCase__)
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
snake_case_ = get_out_block(
out_block_type=lowerCAmelCase__, num_groups_out=lowerCAmelCase__, embed_dim=block_out_channels[0], out_channels=lowerCAmelCase__, act_fn=lowerCAmelCase__, fc_dim=block_out_channels[-1] // 4, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[UNetaDOutput, Tuple]:
snake_case_ = timestep
if not torch.is_tensor(lowerCAmelCase__):
snake_case_ = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0:
snake_case_ = timesteps[None].to(sample.device)
snake_case_ = self.time_proj(lowerCAmelCase__)
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(lowerCAmelCase__)
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase__, temb=lowerCAmelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(lowerCAmelCase__, lowerCAmelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(lowerCAmelCase__, res_hidden_states_tuple=lowerCAmelCase__, temb=lowerCAmelCase__)
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(lowerCAmelCase__, lowerCAmelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase__)
| 69
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
'''simple docstring'''
from torch import nn
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 70
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 71
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def snake_case_ ( A_ : Any, A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = hf_hub_url(repo_id=A_, path=A_, revision=A_ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 72
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
from manim import *
class A_ ( SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[str] = Rectangle(height=0.5 ,width=0.5)
__lowerCamelCase : Optional[int] = Rectangle(height=0.46 ,width=0.46).set_stroke(width=0)
__lowerCamelCase : Dict = [mem.copy() for i in range(6)]
__lowerCamelCase : List[Any] = [mem.copy() for i in range(6)]
__lowerCamelCase : Dict = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Any = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Union[str, Any] = Text('CPU' ,font_size=2_4)
__lowerCamelCase : Union[str, Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = [mem.copy() for i in range(1)]
__lowerCamelCase : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : int = Text('GPU' ,font_size=2_4)
__lowerCamelCase : Union[str, Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
gpu.align_to(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
gpu.set_x(gpu.get_x() - 1)
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = [mem.copy() for i in range(6)]
__lowerCamelCase : List[str] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : int = Text('Model' ,font_size=2_4)
__lowerCamelCase : Dict = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
model.move_to([3, -1.0, 0])
self.play(
Create(SCREAMING_SNAKE_CASE__ ,run_time=1) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1) ,)
__lowerCamelCase : List[Any] = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." ,font_size=2_4 ,)
__lowerCamelCase : Tuple = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCamelCase : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=2.5) ,Write(SCREAMING_SNAKE_CASE__) ,Write(SCREAMING_SNAKE_CASE__))
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = Rectangle(height=0.46 ,width=0.46).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7)
cpu_target.move_to(SCREAMING_SNAKE_CASE__)
cpu_target.generate_target()
__lowerCamelCase : Optional[Any] = 0.46 / 4
__lowerCamelCase : Tuple = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) ,buff=0.02 ,direction=SCREAMING_SNAKE_CASE__)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0)
cpu_targs.append(SCREAMING_SNAKE_CASE__)
first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE__))
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ ,run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE__)
self.play(*SCREAMING_SNAKE_CASE__)
self.wait()
| 73
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
A = b * b - 4 * a * c
A = (-b + sqrt(snake_case__ )) / (2 * a)
A = (-b - sqrt(snake_case__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _snake_case ( ):
A , A = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 74
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : Optional[Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[PIL.Image.Image, np.ndarray]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowerCAmelCase, image_encoder=lowerCAmelCase, image_processor=lowerCAmelCase, scheduler=lowerCAmelCase, renderer=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if latents is None:
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=lowerCAmelCase, dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase_ =latents.to(lowerCAmelCase )
lowerCamelCase_ =latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ =torch.device(f'''cuda:{gpu_id}''' )
lowerCamelCase_ =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase, lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder, '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(image[0], torch.Tensor ):
lowerCamelCase_ =torch.cat(lowerCAmelCase, axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase, axis=0 )
if not isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ =image.to(dtype=self.image_encoder.dtype, device=lowerCAmelCase )
lowerCamelCase_ =self.image_encoder(lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase_ =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ =image_embeds.repeat_interleave(lowerCAmelCase, dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = 1, lowerCAmelCase = 25, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 4.0, lowerCAmelCase = 64, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(lowerCAmelCase, PIL.Image.Image ):
lowerCamelCase_ =1
elif isinstance(lowerCAmelCase, torch.Tensor ):
lowerCamelCase_ =image.shape[0]
elif isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(image[0], (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ =len(lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase )}''' )
lowerCamelCase_ =self._execution_device
lowerCamelCase_ =batch_size * num_images_per_prompt
lowerCamelCase_ =guidance_scale > 1.0
lowerCamelCase_ =self._encode_image(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# prior
self.scheduler.set_timesteps(lowerCAmelCase, device=lowerCAmelCase )
lowerCamelCase_ =self.scheduler.timesteps
lowerCamelCase_ =self.prior.config.num_embeddings
lowerCamelCase_ =self.prior.config.embedding_dim
lowerCamelCase_ =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim), image_embeds.dtype, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, self.scheduler, )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ =latents.reshape(latents.shape[0], lowerCAmelCase, lowerCAmelCase )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ =self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.prior(
lowerCAmelCase, timestep=lowerCAmelCase, proj_embedding=lowerCAmelCase, ).predicted_image_embedding
# remove the variance
lowerCamelCase_, lowerCamelCase_ =noise_pred.split(
scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_, lowerCamelCase_ =noise_pred.chunk(2 )
lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, timestep=lowerCAmelCase, sample=lowerCAmelCase, ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase )
lowerCamelCase_ =[]
for i, latent in enumerate(lowerCAmelCase ):
print()
lowerCamelCase_ =self.renderer.decode(
latent[None, :], lowerCAmelCase, size=lowerCAmelCase, ray_batch_size=4_096, n_coarse_samples=64, n_fine_samples=128, )
images.append(lowerCAmelCase )
lowerCamelCase_ =torch.stack(lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowerCamelCase_ =images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ =[self.numpy_to_pil(lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self, '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase )
| 75
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
def lowerCamelCase__ ( _a = 4000000):
SCREAMING_SNAKE_CASE : Any = [0, 1]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE : List[str] = 0
for j in range(len(_a) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 8.3_1_4_4_5_9_8
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_UpperCamelCase : List[Any] = 3_00
_UpperCamelCase : Tuple = 28
_UpperCamelCase : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 77
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """openai-gpt"""
__UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[int] , lowercase_ :Optional[int]=4_04_78 , lowercase_ :List[Any]=5_12 , lowercase_ :List[str]=7_68 , lowercase_ :int=12 , lowercase_ :Dict=12 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Optional[Any]=1E-5 , lowercase_ :Optional[int]=0.02 , lowercase_ :Optional[Any]="cls_index" , lowercase_ :List[str]=True , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :int=0.1 , **lowercase_ :List[Any] , ) -> str:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowercase_ )
| 78
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase ( __lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ) -> List[str]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_A , _A = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_A = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_A = load_gpta("gpt2" ).to(__lowercase )
print("computing perplexity on objective set" )
_A = compute_perplexity(__lowercase , __lowercase , __lowercase ).item()
print("perplexity on objective set:" , __lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase ( __lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_A = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_A = SecondaryLearner(__lowercase )
# Train secondary learner
_A = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ) -> Optional[Any]:
'''simple docstring'''
_A = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase )
_A = max_steps // (len(__lowercase )) + 1
_A = 0
_A = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase )
_A , _A , _A = recopy_model(__lowercase , __lowercase , __lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase )
secondary_learner.eval()
_A = []
_A = 0
_A = []
_A = []
# Compute the performance of the transformer model at the beginning
_A = compute_perplexity(__lowercase , __lowercase , __lowercase )
test_perps.append(__lowercase )
print("Test perplexity, step" , __lowercase , ":" , __lowercase )
for epoch in range(int(__lowercase ) ):
for step, example in enumerate(__lowercase ):
torch.cuda.empty_cache()
_A = random.randint(0 , example.size(2 ) - context_len - 1 )
_A = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_A = model(__lowercase , labels=__lowercase )
_A = True
if secondary_learner is not None:
_A = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_A = -1
if predicted_q < threshold:
_A = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_A = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_A = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_A = compute_perplexity(__lowercase , __lowercase , __lowercase )
test_perps.append(__lowercase )
print("Test perplexity, step" , __lowercase , ":" , __lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
_A = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__lowercase , default=__lowercase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__lowercase , default=__lowercase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__lowercase , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__lowercase , default=__lowercase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__lowercase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__lowercase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__lowercase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__lowercase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__lowercase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__lowercase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__lowercase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__lowercase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__lowercase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__lowercase , type=__lowercase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__lowercase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__lowercase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__lowercase , type=__lowercase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_A = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_A = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_A = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_A , _A = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 79
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Dict = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
def _A ( lowercase = 1_00_00_00 ):
"""simple docstring"""
a =set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
a =[float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 81
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( snake_case = 1_00_00_00 , snake_case = 10 ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 82
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
snake_case_ : int = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
snake_case_ : Optional[int] = 3e8 # unit of c : m * s^-1
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_UpperCamelCase : Tuple = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCamelCase : str = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCamelCase : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , __A = None , ) -> Tuple:
super().__init__()
self.register_modules(transformer=__A , vae=__A , scheduler=__A )
# create a imagenet -> id dictionary for easier use
lowerCAmelCase_ :str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
lowerCAmelCase_ :Union[str, Any] = int(__A )
lowerCAmelCase_ :str = dict(sorted(self.labels.items() ) )
def __lowerCAmelCase ( self , __A ) -> List[int]:
if not isinstance(__A , __A ):
lowerCAmelCase_ :int = list(__A )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __A , __A = 4.0 , __A = None , __A = 50 , __A = "pil" , __A = True , ) -> Union[ImagePipelineOutput, Tuple]:
lowerCAmelCase_ :Tuple = len(__A )
lowerCAmelCase_ :Optional[int] = self.transformer.config.sample_size
lowerCAmelCase_ :Dict = self.transformer.config.in_channels
lowerCAmelCase_ :str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__A , device=self.device , dtype=self.transformer.dtype , )
lowerCAmelCase_ :str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCAmelCase_ :Tuple = torch.tensor(__A , device=self.device ).reshape(-1 )
lowerCAmelCase_ :Dict = torch.tensor([1000] * batch_size , device=self.device )
lowerCAmelCase_ :Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCAmelCase_ :Tuple = latent_model_input[: len(__A ) // 2]
lowerCAmelCase_ :Optional[Any] = torch.cat([half, half] , dim=0 )
lowerCAmelCase_ :Tuple = self.scheduler.scale_model_input(__A , __A )
lowerCAmelCase_ :List[str] = t
if not torch.is_tensor(__A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCAmelCase_ :Dict = latent_model_input.device.type == """mps"""
if isinstance(__A , __A ):
lowerCAmelCase_ :Tuple = torch.floataa if is_mps else torch.floataa
else:
lowerCAmelCase_ :Union[str, Any] = torch.intaa if is_mps else torch.intaa
lowerCAmelCase_ :str = torch.tensor([timesteps] , dtype=__A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCAmelCase_ :Optional[int] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase_ :str = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCAmelCase_ :Optional[Any] = self.transformer(
__A , timestep=__A , class_labels=__A ).sample
# perform guidance
if guidance_scale > 1:
lowerCAmelCase_ , lowerCAmelCase_ :Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCAmelCase_ , lowerCAmelCase_ :str = torch.split(__A , len(__A ) // 2 , dim=0 )
lowerCAmelCase_ :Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCAmelCase_ :Tuple = torch.cat([half_eps, half_eps] , dim=0 )
lowerCAmelCase_ :int = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCAmelCase_ , lowerCAmelCase_ :int = torch.split(__A , __A , dim=1 )
else:
lowerCAmelCase_ :List[str] = noise_pred
# compute previous image: x_t -> x_t-1
lowerCAmelCase_ :Optional[Any] = self.scheduler.step(__A , __A , __A ).prev_sample
if guidance_scale > 1:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = latent_model_input.chunk(2 , dim=0 )
else:
lowerCAmelCase_ :List[str] = latent_model_input
lowerCAmelCase_ :Tuple = 1 / self.vae.config.scaling_factor * latents
lowerCAmelCase_ :Optional[Any] = self.vae.decode(__A ).sample
lowerCAmelCase_ :Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ :List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ :Optional[int] = self.numpy_to_pil(__A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__A )
| 84
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
import string
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = ""
for i in sequence:
snake_case_ = ord(snake_case )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = string.ascii_letters
snake_case_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case )] if c in letters else c for c in sequence )
def UpperCamelCase_( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
snake_case_ = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=snake_case )} seconds' )
print(f'> atbash(): {timeit("atbash(printable)" , setup=snake_case )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 85
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
"""simple docstring"""
import math
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[str] = 0
while num > 0:
__lowerCAmelCase : Any = num % 8
__lowerCAmelCase : List[str] = octal + (remainder * math.floor(math.pow(10 , _UpperCamelCase ) ))
counter += 1
__lowerCAmelCase : Tuple = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_UpperCamelCase )}"
def __lowerCAmelCase ():
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 86
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class snake_case_ ( __A ):
__A : Dict = "M-CLIP"
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=10_24 , lowercase_ : Optional[int]=7_68 , **lowercase_ : Tuple ) -> Optional[int]:
lowercase__ : Tuple = transformerDimSize
lowercase__ : Union[str, Any] = imageDimSize
super().__init__(**lowercase_ )
class snake_case_ ( __A ):
__A : int = MCLIPConfig
def __init__( self : int , lowercase_ : Any , *lowercase_ : Dict , **lowercase_ : Dict ) -> Any:
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
lowercase__ : Dict = XLMRobertaModel(lowercase_ )
lowercase__ : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ) -> str:
lowercase__ : Dict = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
lowercase__ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 87
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCAmelCase : Tuple = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase : Union[str, Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__lowerCAmelCase : Union[str, Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ) -> Tuple:
"""simple docstring"""
__magic_name__ = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__magic_name__ = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
__magic_name__ = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
__magic_name__ = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 88
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : int = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
_a : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
_a : List[str] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : int = 2
_a : str = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,)
_a : int = floats_tensor(control_image.shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Tuple ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : Tuple ):
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
_a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : str = MultiControlNetModel([controlneta, controlneta] )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : str = 2
_a : Union[str, Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
]
_a : str = floats_tensor(control_image[0].shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Optional[int] ):
_a : Dict = self.get_dummy_components()
_a : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
_a : Any = 10.0
_a : Optional[Any] = 4
_a : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : Optional[int] = scale
_a : Any = pipe(**_UpperCAmelCase )[0]
_a : Any = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = steps
_a : List[str] = scale
_a : List[Any] = pipe(**_UpperCAmelCase ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : int = scale
_a : List[str] = pipe(**_UpperCAmelCase ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = steps
_a : str = scale
_a : int = pipe(**_UpperCAmelCase ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowercase ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowercase ( self : List[str] ):
_a : Optional[int] = self.get_dummy_components()
_a : Dict = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_a : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=_UpperCAmelCase ,controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Union[str, Any] = 'evil space-punk bird'
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_a : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_a : Optional[Any] = pipe(
_UpperCAmelCase ,_UpperCAmelCase ,control_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
_a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
_a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 89
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCamelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCamelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = "pytorch" ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'output' )
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'data' )
self._create_dummy_data(data_dir=lowerCamelCase__ )
__lowerCamelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'metrics.json' )
with open(lowerCamelCase__ ) as f:
__lowerCamelCase = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 90
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : List[Any] = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/'''))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , '''src/transformers/models/bert/modeling_bert.py''') , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''') , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''src/transformers'''
shutil.rmtree(self.transformer_dir)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
SCREAMING_SNAKE_CASE_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
SCREAMING_SNAKE_CASE_ : Optional[int] = black.format_str(lowercase_ , mode=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.transformer_dir , '''new_code.py''')
with open(lowercase_ , '''w''' , newline='''\n''') as f:
f.write(lowercase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_)
with open(lowercase_ , '''r''') as f:
self.assertTrue(f.read() , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''')
self.assertEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowercase_) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('''Bert''' , lowercase_ , lowercase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowercase_ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowercase_) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
SCREAMING_SNAKE_CASE_ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
SCREAMING_SNAKE_CASE_ : Tuple = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
self.assertFalse(lowercase_)
self.assertEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_)
| 91
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""PoolFormerFeatureExtractor"""]
UpperCamelCase__ = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 92
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowercase : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_lowercase : List[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_lowercase : Optional[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return float((preds == labels).mean() )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str="binary" ):
"""simple docstring"""
lowercase_ : Dict = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : str = {}
for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowercase_ : Any = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase_ : List[str] = [(pred, label)]
lowercase_ , lowercase_ : Dict = [], []
for question, preds_labels in question_map.items():
lowercase_ , lowercase_ : List[Any] = zip(*__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) )
ems.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _snake_case ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def _snake_case ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase_ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase_ : str = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 93
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : int = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 94
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _A ( ):
"""simple docstring"""
a__ : Dict =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE )
# Let's go
a__ : List[str] =parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowercase__ = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowercase__ = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowercase__ = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
lowercase__ = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["input_ids"]
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_lowerCamelCase : str = do_lower_case
_lowerCamelCase : Optional[Any] = sentencepiece_model_ckpt
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_lowerCamelCase : Dict = self.load_vocab(filepath=lowercase )
else:
_lowerCamelCase : Optional[int] = {self.sp_model.id_to_piece(lowercase ): id for id in range(self.sp_model.get_piece_size() )}
_lowerCamelCase : int = {v: k for k, v in self.vocab.items()}
def A_ ( self , lowercase ):
if text is None:
return None
_lowerCamelCase : Tuple = self.tokenize(lowercase )
_lowerCamelCase, _lowerCamelCase : Any = '', []
for i, ch in enumerate(lowercase ):
if ch in self.SP_CHAR_MAPPING:
_lowerCamelCase : List[str] = self.SP_CHAR_MAPPING.get(lowercase )
else:
_lowerCamelCase : Dict = unicodedata.normalize('NFKC' , lowercase )
if self.is_whitespace(lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase ) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
_lowerCamelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_lowerCamelCase : List[str] = token[1:]
_lowerCamelCase : Union[str, Any] = text[offset:].index(lowercase ) + offset
_lowerCamelCase : Optional[int] = start + len(lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_lowerCamelCase : Optional[Any] = end
return token_mapping
@property
def A_ ( self ):
return len(self.vocab )
def A_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : Optional[int] = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self , lowercase ):
return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase ) for c in text) )
def A_ ( self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
_lowerCamelCase : Optional[int] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
_lowerCamelCase : Tuple = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
_lowerCamelCase : Tuple = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
_lowerCamelCase : Optional[Any] = self.sp_model.EncodeAsPieces(lowercase )
else:
_lowerCamelCase : List[str] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase )
_lowerCamelCase : Tuple = []
for pi, piece in enumerate(lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase ) and pi != 0:
new_pieces.append(lowercase )
continue
else:
continue
_lowerCamelCase : Tuple = 0
for i, chunk in enumerate(lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase ) or self.is_punct(lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase )
_lowerCamelCase : int = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase : Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase : Tuple = i
if len(lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Any = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase ):
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.reverse_vocab.get(lowercase , self.unk_token )
def A_ ( self , lowercase , lowercase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self , lowercase , lowercase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self , lowercase , lowercase=None , lowercase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def A_ ( self , lowercase , lowercase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase ) + 1) + [1] * (len(lowercase ) + 3)
def A_ ( self , lowercase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self , lowercase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self , lowercase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self , lowercase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase ) == 1:
_lowerCamelCase : Tuple = unicodedata.category(lowercase )
if cat == "Zs":
return True
return False
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = {}
with io.open(lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase ):
_lowerCamelCase : int = line.rstrip('\n' )
_lowerCamelCase : Optional[Any] = int(lowercase )
return token_to_idx
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Union[str, Any] = 0
if os.path.isdir(lowercase ):
_lowerCamelCase : List[Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_lowerCamelCase : Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_lowerCamelCase : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
_lowerCamelCase : List[str] = os.path.join(lowercase , 'sentencepiece.bpe.model' )
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (vocab_file,)
| 96
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
import os
from distutils.util import strtobool
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
UpperCamelCase__ :Optional[int] = int(os.environ.get(__a , -1 ) )
if val >= 0:
return val
return default
def a ( __a , __a=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = os.environ.get(__a , str(__a ) )
return strtobool(__a ) == 1 # As its name indicates `strtobool` actually returns an int...
def a ( __a , __a="no" ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = os.environ.get(__a , str(__a ) )
return value
| 97
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__magic_name__ = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
__magic_name__ = "|".join(sys.argv[1:])
__magic_name__ = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 100
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase__ :Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,**A__):
super().__init__(*A__ ,**A__)
requires_backends(self ,'''decord''')
self.check_model_type(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None):
lowercase = {}
if frame_sampling_rate is not None:
lowercase = frame_sampling_rate
if num_frames is not None:
lowercase = num_frames
lowercase = {}
if top_k is not None:
lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,A__ ,**A__):
return super().__call__(A__ ,**A__)
def A__ ( self ,A__ ,A__=None ,A__=1):
if num_frames is None:
lowercase = self.model.config.num_frames
if video.startswith('''http://''') or video.startswith('''https://'''):
lowercase = BytesIO(requests.get(A__).content)
lowercase = VideoReader(A__)
videoreader.seek(0)
lowercase = 0
lowercase = num_frames * frame_sampling_rate - 1
lowercase = np.linspace(A__ ,A__ ,num=A__ ,dtype=np.intaa)
lowercase = videoreader.get_batch(A__).asnumpy()
lowercase = list(A__)
lowercase = self.image_processor(A__ ,return_tensors=self.framework)
return model_inputs
def A__ ( self ,A__):
lowercase = self.model(**A__)
return model_outputs
def A__ ( self ,A__ ,A__=5):
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1)[0]
lowercase , lowercase = probs.topk(A__)
else:
raise ValueError(f'Unsupported framework: {self.framework}')
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A__ ,A__)]
| 101
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case : Tuple = str(bin(_snake_case ) )[2:] # remove the leading "0b"
__snake_case : List[Any] = str(bin(_snake_case ) )[2:]
__snake_case : Any = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
from collections.abc import Callable
import numpy as np
def UpperCamelCase( __UpperCamelCase : Callable ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase_ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
lowerCAmelCase_ : Tuple = ya
lowerCAmelCase_ : Dict = xa
for k in range(__UpperCamelCase ):
lowerCAmelCase_ : str = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] )
lowerCAmelCase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]=None ,**lowercase__ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[str] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 104
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 106
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = """marian"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=5_81_01 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=10_24 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Any=40_96 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : int=40_96 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : str=10_24 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[Any]=5_81_00 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=5_81_00 , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=True , **__lowerCamelCase : Tuple , ) -> int:
a = vocab_size
a = decoder_vocab_size or vocab_size
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = encoder_layerdrop
a = decoder_layerdrop
a = use_cache
a = encoder_layers
a = scale_embedding # scale factor will be sqrt(d_model) if True
a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a = {0: "batch"}
a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a = {0: "batch", 1: "decoder_sequence"}
a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a , a = self.num_layers
for i in range(__lowerCamelCase ):
a = {0: "batch", 2: "past_sequence + sequence"}
a = {0: "batch", 2: "past_sequence + sequence"}
else:
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a = super().outputs
else:
a = super(__lowerCamelCase , self ).outputs
if self.use_past:
a , a = self.num_layers
for i in range(__lowerCamelCase ):
a = {0: "batch", 2: "past_sequence + sequence"}
a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
a = seq_length if not self.use_past else 1
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
a = common_inputs["decoder_input_ids"].shape[1]
a , a = self.num_attention_heads
a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a = decoder_seq_length + 3
a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a = self.num_layers
a = min(__lowerCamelCase , __lowerCamelCase )
a = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a = seqlen + 2
a , a = self.num_layers
a , a = self.num_attention_heads
a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a = common_inputs["attention_mask"].dtype
a = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
a = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
a = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> str:
if self.task in ["default", "seq2seq-lm"]:
a = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
a = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4
| 107
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _snake_case ( UpperCamelCase : Union[str, Any] ):
# getting number of pixels in the image
UpperCAmelCase , UpperCAmelCase : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
UpperCAmelCase : str = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A: Union[str, Any] = imread("image_data/lena.jpg", 1)
# convert to its negative
A: Optional[Any] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 109
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCamelCase__: str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("sample_euler" )
UpperCamelCase__: Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__: Optional[Any] = torch.manual_seed(0 )
UpperCamelCase__: Optional[int] = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase__: Optional[Any] = output.images
UpperCamelCase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__: int = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Any = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase__: Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("sample_euler" )
UpperCamelCase__: str = '''A painting of a squirrel eating a burger'''
UpperCamelCase__: Optional[int] = torch.manual_seed(0 )
UpperCamelCase__: List[str] = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase__: Any = output.images
UpperCamelCase__: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__: str = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase__: int = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
UpperCamelCase__: Any = '''A painting of a squirrel eating a burger'''
UpperCamelCase__: List[str] = torch.manual_seed(0 )
UpperCamelCase__: Optional[int] = sd_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=_A , )
UpperCamelCase__: Tuple = output.images
UpperCamelCase__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__: List[Any] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any=False ):
'''simple docstring'''
A_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : List[str] ,__lowercase : Dict=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Optional[Any] = ''''''
else:
A_ : List[Any] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
A_ : Any = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Optional[Any] ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : List[Any] = dct.pop(__lowercase )
A_ : Optional[int] = val
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Tuple = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowercase : Dict ,__lowercase : str ):
'''simple docstring'''
A_ : str = DeiTConfig()
# all deit models have fine-tuned heads
A_ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : str = 10_00
A_ : Dict = '''huggingface/label-files'''
A_ : Optional[int] = '''imagenet-1k-id2label.json'''
A_ : List[str] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : int = idalabel
A_ : Any = {v: k for k, v in idalabel.items()}
A_ : Tuple = int(deit_name[-6:-4] )
A_ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
A_ : Optional[int] = 1_92
A_ : Tuple = 7_68
A_ : Optional[Any] = 12
A_ : Optional[int] = 3
elif deit_name[9:].startswith('small' ):
A_ : List[Any] = 3_84
A_ : List[Any] = 15_36
A_ : int = 12
A_ : List[str] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
A_ : Dict = 10_24
A_ : Tuple = 40_96
A_ : List[Any] = 24
A_ : Any = 16
# load original model from timm
A_ : Any = timm.create_model(__lowercase ,pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
A_ : Union[str, Any] = create_rename_keys(__lowercase ,__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase ,__lowercase ,__lowercase )
read_in_q_k_v(__lowercase ,__lowercase ,__lowercase )
# load HuggingFace model
A_ : Tuple = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : Optional[Any] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : Any = DeiTImageProcessor(size=__lowercase ,crop_size=config.image_size )
A_ : List[str] = image_processor(images=prepare_img() ,return_tensors='pt' )
A_ : Tuple = encoding['''pixel_values''']
A_ : Tuple = model(__lowercase )
A_ : Union[str, Any] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase ,outputs.logits ,atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 140
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_A) for s in shape])}.npy"
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
def a_ ( self , __lowerCAmelCase=0 , __lowerCAmelCase=(4, 4, 64, 64) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(_A , _A)) , dtype=_A)
return image
def a_ ( self , __lowerCAmelCase=False , __lowerCAmelCase="CompVis/stable-diffusion-v1-4"):
"""simple docstring"""
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = '''bf16''' if fpaa else None
lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
_A , subfolder="""unet""" , dtype=_A , revision=_A)
return model, params
def a_ ( self , __lowerCAmelCase=0 , __lowerCAmelCase=(4, 77, 768) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(_A , _A)) , dtype=_A)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_A)
lowerCAmelCase = self.get_latents(_A , fpaa=_A)
lowerCAmelCase = self.get_encoder_hidden_states(_A , fpaa=_A)
lowerCAmelCase = model.apply(
{"""params""": params} , _A , jnp.array(_A , dtype=jnp.intaa) , encoder_hidden_states=_A , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowerCAmelCase = jnp.array(_A , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_A , _A , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_A)
lowerCAmelCase = self.get_latents(_A , shape=(4, 4, 96, 96) , fpaa=_A)
lowerCAmelCase = self.get_encoder_hidden_states(_A , shape=(4, 77, 1024) , fpaa=_A)
lowerCAmelCase = model.apply(
{"""params""": params} , _A , jnp.array(_A , dtype=jnp.intaa) , encoder_hidden_states=_A , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowerCAmelCase = jnp.array(_A , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_A , _A , atol=1E-2)
| 272
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
import math
snake_case : Any = 10
snake_case : List[str] = 7
snake_case : Any = BALLS_PER_COLOUR * NUM_COLOURS
def __lowercase ( __lowerCAmelCase : Any = 2_0 ):
a__ = math.comb(__lowerCAmelCase , __lowerCAmelCase )
a__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase )
a__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 240
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_A = logging.get_logger(__name__)
_A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'deberta-v2'
def __init__(self , _lowerCamelCase=128100 , _lowerCamelCase=1536 , _lowerCamelCase=24 , _lowerCamelCase=24 , _lowerCamelCase=6144 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-7 , _lowerCamelCase=False , _lowerCamelCase=-1 , _lowerCamelCase=0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=0 , _lowerCamelCase="gelu" , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : str = relative_attention
UpperCAmelCase__ : Tuple = max_relative_positions
UpperCAmelCase__ : List[str] = pad_token_id
UpperCAmelCase__ : Optional[Any] = position_biased_input
# Backwards compatibility
if type(_A ) == str:
UpperCAmelCase__ : List[str] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
UpperCAmelCase__ : Optional[Any] = pos_att_type
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Any = kwargs.get("""pooler_hidden_size""" , _A )
UpperCAmelCase__ : Tuple = pooler_dropout
UpperCAmelCase__ : int = pooler_hidden_act
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a (self ):
"""simple docstring"""
return 12
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 40 , _lowerCamelCase = 40 , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = super().generate_dummy_inputs(preprocessor=_A , framework=_A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 171
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
lowerCamelCase__ : Dict = 'pytorch_model.bin'
@dataclasses.dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "The name of the task to train on."} , )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowercase_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowercase_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowercase_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowercase_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase_ = dataclasses.field(
default=lowerCAmelCase__ , metadata={"help": "Random seed for initialization."} , )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE_ = dataset.filter(lambda __UpperCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE_ = int(eval_result * len(__UpperCAmelCase ) )
print(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = dataset.sort('probability' , reverse=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = dataset.select(range(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = dataset.remove_columns(['label', 'probability'] )
SCREAMING_SNAKE_CASE_ = dataset.rename_column('prediction' , 'label' )
SCREAMING_SNAKE_CASE_ = dataset.map(lambda __UpperCAmelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE_ = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , f"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCAmelCase , index=__UpperCAmelCase )
else:
dataset.to_json(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : int , **__UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ = STModelArguments(model_name_or_path=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = STDataArguments(train_file=__UpperCAmelCase , infer_file=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = STTrainingArguments(output_dir=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCAmelCase ).items():
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for key, value in kwargs.items():
if hasattr(__UpperCAmelCase , __UpperCAmelCase ):
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Sanity checks
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE_ = args.train_file
SCREAMING_SNAKE_CASE_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE_ = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE_ = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE_ = extension
else:
assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
SCREAMING_SNAKE_CASE_ = f"{args.output_dir}/self-train_iter-{{}}".format
SCREAMING_SNAKE_CASE_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Show the progress bar
SCREAMING_SNAKE_CASE_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE_ = data_dir_format(__UpperCAmelCase )
assert os.path.exists(__UpperCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'stage-1' )
SCREAMING_SNAKE_CASE_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCAmelCase , __UpperCAmelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'best-checkpoint' , __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __UpperCAmelCase , __UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __UpperCAmelCase )
finetune(**__UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , __UpperCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'best-checkpoint' )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'stage-2' )
# Update arguments_dict
SCREAMING_SNAKE_CASE_ = model_path
SCREAMING_SNAKE_CASE_ = data_files['''train''']
SCREAMING_SNAKE_CASE_ = current_output_dir
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'best-checkpoint' , __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __UpperCAmelCase , __UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __UpperCAmelCase )
finetune(**__UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = iteration
SCREAMING_SNAKE_CASE_ = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(os.path.join(__UpperCAmelCase , 'best-checkpoint' ) )
SCREAMING_SNAKE_CASE_ = config.idalabel
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'eval_results_best-checkpoint.json' )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(__UpperCAmelCase )
with open(__UpperCAmelCase , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = float(json.load(__UpperCAmelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(__UpperCAmelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE_ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['''data''']
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files={'data': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
shutil.copy(__UpperCAmelCase , os.path.join(__UpperCAmelCase , f"eval_results_iter-{iteration}.json" ) )
if os.path.exists(__UpperCAmelCase ):
shutil.copy(__UpperCAmelCase , os.path.join(__UpperCAmelCase , f"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , f"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE_ = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
SCREAMING_SNAKE_CASE_ = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , __UpperCAmelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCAmelCase , f"eval_results_iter-{iteration}.json" ) , os.path.join(__UpperCAmelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCAmelCase , f"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(__UpperCAmelCase , 'eval_results_best-iteration.json' ) , )
| 225
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:List[str] = "mra"
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="full" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , )-> Tuple:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =block_per_row
lowerCamelCase_ =approx_mode
lowerCamelCase_ =initial_prior_first_n_blocks
lowerCamelCase_ =initial_prior_diagonal_n_blocks
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__a = True if '''large''' in model_name or '''huge''' in model_name else False
__a = True if '''large''' in model_name or '''huge''' in model_name else False
__a = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__a = [3, 3, 3, 3]
__a = [5, 5, 5, 5]
elif "fl4" in model_name:
__a = [4, 4, 4, 4]
__a = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__a = [3, 3, 3, 3]
if "lrf" in model_name:
__a = [3, 3, 3, 3]
else:
__a = [2, 2, 2, 2]
if "tiny" in model_name:
__a = 96
elif "small" in model_name:
__a = 96
elif "base" in model_name:
__a = 128
elif "large" in model_name:
__a = 192
elif "xlarge" in model_name:
__a = 256
elif "huge" in model_name:
__a = 352
# set label information
__a = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__a = '''imagenet-22k-id2label.json'''
else:
__a = '''imagenet-1k-id2label.json'''
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__a = '''encoder.''' + name
if "encoder.layers" in name:
__a = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__a = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__a = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__a = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__a = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__a = '''layernorm.weight'''
if name == "norm.bias":
__a = '''layernorm.bias'''
if "head" in name:
__a = name.replace("""head""" , """classifier""" )
else:
__a = '''focalnet.''' + name
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
__a = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__a = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _SCREAMING_SNAKE_CASE )
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )['''model''']
# rename keys
for key in state_dict.copy().keys():
__a = state_dict.pop(_SCREAMING_SNAKE_CASE )
__a = val
__a = get_focalnet_config(_SCREAMING_SNAKE_CASE )
__a = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
__a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__a = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
__a = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__a = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1e-4 )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__a = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__a = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__a = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__a = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__a = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__a = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase : Tuple = HfApi()
lowercase : str = {}
# fmt: off
lowercase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase : List[Any] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase : int = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase : int = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase : Optional[int] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase : Any = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase : Dict = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase : int = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase : Tuple = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase : str = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase : Any = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase : Optional[int] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase : Optional[Any] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase : str = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase : str = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase : Any = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase : Any = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("CompVis"):
lowercase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
lowercase : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase : Optional[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase : Optional[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 42
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : str=None , ) -> Dict:
if attention_mask is None:
_lowerCAmelCase : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase )
if decoder_head_mask is None:
_lowerCAmelCase : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
if cross_attn_head_mask is None:
_lowerCAmelCase : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=9_9 , snake_case_=1_6 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="relu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=2_0 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = encoder_layerdrop
_lowerCAmelCase : Optional[Any] = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Dict = eos_token_id
_lowerCAmelCase : Dict = pad_token_id
_lowerCAmelCase : Tuple = bos_token_id
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = self.eos_token_id # Eos Token
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : Dict = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : List[Any] = self.get_config()
_lowerCAmelCase : Tuple = prepare_mam_aaa_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __UpperCamelCase ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = MaMaaaModel(config=_A ).get_decoder().to(_A ).eval()
_lowerCAmelCase : Union[str, Any] = inputs_dict['''input_ids''']
_lowerCAmelCase : Optional[int] = inputs_dict['''attention_mask''']
_lowerCAmelCase : Any = inputs_dict['''head_mask''']
# first forward pass
_lowerCAmelCase : List[Any] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
_lowerCAmelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Optional[int] = model(_A , attention_mask=_A )['''last_hidden_state''']
_lowerCAmelCase : List[str] = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-2 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = MaMaaaModel(config=_A ).to(_A ).eval()
_lowerCAmelCase : Tuple = model(**_A )
_lowerCAmelCase : Any = outputs.encoder_last_hidden_state
_lowerCAmelCase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = model.get_encoder()
encoder.save_pretrained(_A )
_lowerCAmelCase : List[str] = MaMaaaEncoder.from_pretrained(_A ).to(_A )
_lowerCAmelCase : Optional[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = model.get_decoder()
decoder.save_pretrained(_A )
_lowerCAmelCase : int = MaMaaaDecoder.from_pretrained(_A ).to(_A )
_lowerCAmelCase : Any = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=_A , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class a_ (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : str = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowerCAmelCase : Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : int = False
__lowerCAmelCase : Tuple = False
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = MaMaaaModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=_A )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
_lowerCAmelCase : List[str] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info["""missing_keys"""] , [] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCAmelCase : str = model_class(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = copy.deepcopy(self._prepare_for_class(_A , _A ) )
if not self.is_encoder_decoder:
_lowerCAmelCase : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
_lowerCAmelCase : List[str] = inputs['''input_ids''']
_lowerCAmelCase : Optional[Any] = inputs.get("""decoder_input_ids""" , _A )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , _A )
_lowerCAmelCase : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCAmelCase : Optional[Any] = wte(_A )
else:
_lowerCAmelCase : Dict = wte(_A )
_lowerCAmelCase : str = wte(_A )
with torch.no_grad():
model(**_A )[0]
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = input_dict['''input_ids''']
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : int = MaMaaaForConditionalGeneration(_A ).eval().to(_A )
if torch_device == "cuda":
model.half()
model.generate(_A , attention_mask=_A )
model.generate(num_beams=4 , do_sample=_A , early_stopping=_A , num_return_sequences=3 )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Dict:
return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(_A )
_lowerCAmelCase : Dict = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCAmelCase : int = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCAmelCase : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _A , _A )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**_A )[0]
_lowerCAmelCase : List[Any] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , _A )
# change to expected output here
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_A )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=_A ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_A )
# change to intended input
_lowerCAmelCase : Dict = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCAmelCase : Optional[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCAmelCase : Tuple = prepare_mam_aaa_inputs_dict(model.config , _A , _A )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_A )[0]
_lowerCAmelCase : Any = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , _A )
# change to expected output here
_lowerCAmelCase : int = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_A )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=_A ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_A )
_lowerCAmelCase : Any = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
_lowerCAmelCase : Union[str, Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCAmelCase : List[str] = tokenizer(_A , padding=_A , return_tensors="""pt""" )
_lowerCAmelCase : Tuple = model.generate(
input_ids=dct["""input_ids"""].to(_A ) , attention_mask=dct["""attention_mask"""].to(_A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
_lowerCAmelCase : Optional[int] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
_lowerCAmelCase : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_A , skip_special_tokens=_A )
assert generated == expected_en
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : str ) -> str:
_a = LxmertConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = LxmertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
import os
import numpy
import onnx
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: Union[str, Any] = a.name
UpperCamelCase__: Union[str, Any] = b.name
UpperCamelCase__: List[Any] = ''''''
UpperCamelCase__: Union[str, Any] = ''''''
UpperCamelCase__: List[Any] = a == b
UpperCamelCase__: Dict = name_a
UpperCamelCase__: str = name_b
return res
def lowerCAmelCase_ ( A_ ,A_ ,A_):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(A_ ,A_)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,A_ ,A_)
_graph_replace_input_with(node_proto.attribute[1].g ,A_ ,A_)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,A_ ,A_)
def lowerCAmelCase_ ( A_ ,A_ ,A_):
for n in graph_proto.node:
_node_replace_input_with(A_ ,A_ ,A_)
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: Dict = list(model.graph.initializer)
UpperCamelCase__: Dict = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__: Any = inits[i].name
UpperCamelCase__: Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,A_ ,A_)
def lowerCAmelCase_ ( A_):
UpperCamelCase__: str = os.path.dirname(A_)
UpperCamelCase__: Union[str, Any] = os.path.basename(A_)
UpperCamelCase__: Tuple = onnx.load(os.path.join(A_ ,A_))
UpperCamelCase__: Dict = list(model.graph.initializer)
UpperCamelCase__: Optional[Any] = set()
UpperCamelCase__: Optional[int] = {}
UpperCamelCase__: Optional[Any] = []
UpperCamelCase__: int = 0
for i in range(len(A_)):
if i in dup_set:
continue
for j in range(i + 1 ,len(A_)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j]):
dup_set.add(A_)
dup_set.add(A_)
UpperCamelCase__: List[str] = inits[j].data_type
UpperCamelCase__: List[Any] = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " ,A_)
total_reduced_size += mem_size
UpperCamelCase__: List[Any] = inits[i].name
UpperCamelCase__: List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(A_)
else:
UpperCamelCase__: List[str] = [name_j]
ind_to_replace.append((j, i))
print("total reduced size: " ,total_reduced_size / 10_24 / 10_24 / 10_24 ,"GB")
UpperCamelCase__: Any = sorted(A_)
_remove_dup_initializers_from_model(A_ ,A_ ,A_)
UpperCamelCase__: Dict = '''optimized_''' + model_file_name
UpperCamelCase__: List[str] = os.path.join(A_ ,A_)
onnx.save(A_ ,A_)
return new_model
| 149
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A , cache_dir=_A )
A_ : List[Any] = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , 'snapshots' ) )]
A_ : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A )
A_ : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Optional[int] = jax.random.PRNGKey(0 )
A_ : List[Any] = 4
A_ : Dict = jax.device_count()
A_ : Optional[Any] = num_samples * [prompt]
A_ : Tuple = pipeline.prepare_inputs(_A )
# shard inputs and rng
A_ : Tuple = replicate(_A )
A_ : Union[str, Any] = jax.random.split(_A , _A )
A_ : List[Any] = shard(_A )
A_ : Dict = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
A_ : Tuple = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_A )
A_ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : List[str] = jax.random.PRNGKey(0 )
A_ : Union[str, Any] = 5_0
A_ : Tuple = jax.device_count()
A_ : List[Any] = num_samples * [prompt]
A_ : str = pipeline.prepare_inputs(_A )
# shard inputs and rng
A_ : Union[str, Any] = replicate(_A )
A_ : int = jax.random.split(_A , _A )
A_ : str = shard(_A )
A_ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A )
A_ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Union[str, Any] = jax.random.PRNGKey(0 )
A_ : int = 5_0
A_ : Tuple = jax.device_count()
A_ : List[str] = num_samples * [prompt]
A_ : Tuple = pipeline.prepare_inputs(_A )
# shard inputs and rng
A_ : Optional[Any] = replicate(_A )
A_ : Dict = jax.random.split(_A , _A )
A_ : Tuple = shard(_A )
A_ : str = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A_ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : List[Any] = jax.random.PRNGKey(0 )
A_ : List[str] = 5_0
A_ : Optional[Any] = jax.device_count()
A_ : int = num_samples * [prompt]
A_ : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
A_ : List[Any] = replicate(_A )
A_ : Union[str, Any] = jax.random.split(_A , _A )
A_ : int = shard(_A )
A_ : Dict = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_A , steps_offset=1 , )
A_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
A_ : Any = scheduler.create_state()
A_ : Any = scheduler_state
A_ : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Optional[Any] = jax.random.PRNGKey(0 )
A_ : Union[str, Any] = 5_0
A_ : Optional[Any] = jax.device_count()
A_ : List[str] = num_samples * [prompt]
A_ : Union[str, Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
A_ : Dict = replicate(_A )
A_ : int = jax.random.split(_A , _A )
A_ : Optional[Any] = shard(_A )
A_ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : List[Any] = jax.device_count()
A_ : Dict = num_samples * [prompt]
A_ : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , _A )
A_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , )
A_ : Any = replicate(_A )
A_ : int = pipeline.prepare_inputs(_A )
A_ : Any = shard(_A )
A_ : List[str] = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
A_ : Dict = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
A_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
A_ : int = replicate(_A )
A_ : Optional[Any] = pipeline.prepare_inputs(_A )
A_ : List[Any] = shard(_A )
A_ : Optional[Any] = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
A_ : Union[str, Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 140
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 1_6
__lowercase = 3_2
def snake_case__ ( _A: str , _A: Optional[int] = 16 , _A: Dict = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_A: int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase = datasets.map(
_A , batched=_A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_A: Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=_A , collate_fn=_A , batch_size=_A )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def snake_case__ ( _A: Any , _A: Optional[Any] , _A: Dict , _A: Any ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**_A )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
lowerCAmelCase = metric.compute()
return eval_metric["accuracy"]
def snake_case__ ( _A: Optional[Any] , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config['''lr''']
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
lowerCAmelCase = args.model_name_or_path
set_seed(_A )
lowerCAmelCase = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase = 1
lowerCAmelCase = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
lowerCAmelCase = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase = 0
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase = int(_A ) + 1
lowerCAmelCase = evaluation_loop(_A , _A , _A , _A )
accelerator.print("""resumed checkpoint performance:""" , _A )
accelerator.print("""resumed checkpoint\'s scheduler\'s lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers\'s lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , """r""" ) as f:
lowerCAmelCase = json.load(_A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
lowerCAmelCase = model(**_A )
lowerCAmelCase = outputs.loss
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase = f"epoch_{epoch}"
lowerCAmelCase = os.path.join(args.output_dir , _A )
accelerator.save_state(_A )
lowerCAmelCase = evaluation_loop(_A , _A , _A , _A )
lowerCAmelCase = accuracy
lowerCAmelCase = lr_scheduler.get_lr()[0]
lowerCAmelCase = optimizer.param_groups[0]['''lr''']
lowerCAmelCase = epoch
lowerCAmelCase = overall_step
accelerator.print(f"epoch {epoch}:" , _A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , """w""" ) as f:
json.dump(_A , _A )
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_A , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--output_dir""" , type=_A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=_A , default=_A , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=_A , default=_A , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=_A , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 272
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :str ,__snake_case :Tuple ,__snake_case :str=7 ,__snake_case :Optional[Any]=3 ,__snake_case :List[str]=30 ,__snake_case :Any=4_00 ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=None ,__snake_case :Tuple=True ,__snake_case :int=[0.5, 0.5, 0.5] ,__snake_case :Union[str, Any]=[0.5, 0.5, 0.5] ,__snake_case :Dict=True ,__snake_case :Union[str, Any]=1 / 2_55 ,__snake_case :Optional[int]=True ,) -> str:
a__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :Any=False ) -> Dict:
if not batched:
a__ = image_inputs[0]
if isinstance(_A ,Image.Image ):
a__ = image.size
else:
a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size['''shortest_edge''']
elif w > h:
a__ = self.size['''shortest_edge''']
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size['''shortest_edge''']
a__ = self.size['''shortest_edge''']
else:
a__ = []
for image in image_inputs:
a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(_A ,key=lambda __snake_case : item[0] )[0]
a__ = max(_A ,key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ (lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ = DetaImageProcessingTester(self )
@property
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :int ) -> Dict:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'do_resize' ) )
self.assertTrue(hasattr(_A ,'do_rescale' ) )
self.assertTrue(hasattr(_A ,'do_pad' ) )
self.assertTrue(hasattr(_A ,'size' ) )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad ,_A )
def lowerCamelCase__( self :str ) -> Dict:
pass
def lowerCamelCase__( self :str ) -> int:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(_A ,return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
a__ = DetaImageProcessor()
a__ = image_processing(images=_A ,annotations=_A ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,_A )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_A ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_A ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_A )
a__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_A ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_A ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_A ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_A ) )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_A ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_A ) )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = DetaImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=_A ,annotations=_A ,masks_path=_A ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,_A )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_A ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_A ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_A )
a__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_A ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_A ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_A ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_A ) )
# verify masks
a__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,_A )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_A ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_A ) )
| 240
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
from ....utils import logging
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=2048 ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = config.__dict__
UpperCAmelCase__ : Union[str, Any] = modal_hidden_size
if num_labels:
UpperCAmelCase__ : Tuple = num_labels
| 171
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Optional[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n'
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any=8 ) -> str:
SCREAMING_SNAKE_CASE_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : DDPMScheduler , _lowerCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
SCREAMING_SNAKE_CASE_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(_A )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Tuple=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : Union[str, Any] ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 100 , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = negative_image_embeds.repeat_interleave(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = hint.repeat_interleave(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
SCREAMING_SNAKE_CASE_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.movq.config.latent_channels
SCREAMING_SNAKE_CASE_ = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = {'''image_embeds''': image_embeds, '''hint''': hint}
SCREAMING_SNAKE_CASE_ = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 225
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__A : Tuple = logging.getLogger(__name__)
__A : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
__A : Dict = {'facebook/bart-base': BartTokenizer}
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
lowerCamelCase_ =parser.parse_args()
return args
def __UpperCamelCase ( _A : str , _A : List[str]="cpu" ) ->Any:
"""simple docstring"""
lowerCamelCase_ =model_dict[model_name].from_pretrained(_A ).to(_A )
lowerCamelCase_ =tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
lowerCamelCase_ =0
lowerCamelCase_ =None
lowerCamelCase_ =0
return huggingface_model, tokenizer
def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Optional[int] ) ->Dict:
"""simple docstring"""
model.eval()
lowerCamelCase_ =None
lowerCamelCase_ =torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
lowerCamelCase_ ='''My friends are cool but they eat too many carbs.'''
lowerCamelCase_ =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
lowerCamelCase_ =model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
lowerCamelCase_ =remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
lowerCamelCase_ =onnxruntime.InferenceSession(_A )
lowerCamelCase_ =ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =parse_args()
lowerCamelCase_ =5
lowerCamelCase_ =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ =torch.device(args.device )
lowerCamelCase_ =load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
lowerCamelCase_ =args.max_length
if args.num_beams:
lowerCamelCase_ =args.num_beams
if args.output_file_path:
lowerCamelCase_ =args.output_file_path
else:
lowerCamelCase_ ='''BART.onnx'''
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main()
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase__ = numpy.array([0, 0])
lowerCamelCase__ = numpy.array([0.5, 0.866_0254])
lowerCamelCase__ = numpy.array([1, 0])
lowerCamelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__a = initial_vectors
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = iteration_step(_SCREAMING_SNAKE_CASE )
return vectors
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a = []
for i, start_vector in enumerate(vectors[:-1] ):
__a = vectors[i + 1]
new_vectors.append(_SCREAMING_SNAKE_CASE )
__a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = numpy.radians(_SCREAMING_SNAKE_CASE )
__a = numpy.cos(_SCREAMING_SNAKE_CASE ), numpy.sin(_SCREAMING_SNAKE_CASE )
__a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a = zip(*_SCREAMING_SNAKE_CASE )
plt.plot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A=True , __A="pt" ) -> Optional[int]:
_snake_case = {'''add_prefix_space''': True} if isinstance(__A , __A ) and not line.startswith(' ' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__A , padding='max_length' if pad_to_max_length else None , truncation=__A , return_tensors=__A , add_special_tokens=__A , **__A , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , ) -> Optional[int]:
_snake_case = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCAmelCase ( lowerCAmelCase__ ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ):
"""simple docstring"""
super().__init__()
_snake_case = Path(_A ).joinpath(type_path + '.source' )
_snake_case = Path(_A ).joinpath(type_path + '.target' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , _A ).rstrip('\n' )
_snake_case = linecache.getline(str(self.tgt_file ) , _A ).rstrip('\n' )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _A ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _A ) else self.tokenizer
_snake_case = encode_line(_A , _A , self.max_source_length , 'right' )
_snake_case = encode_line(_A , _A , self.max_target_length , 'right' )
_snake_case = source_inputs['''input_ids'''].squeeze()
_snake_case = target_inputs['''input_ids'''].squeeze()
_snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
return [len(_A ) for x in Path(_A ).open().readlines()]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = torch.stack([x['input_ids'] for x in batch] )
_snake_case = torch.stack([x['attention_mask'] for x in batch] )
_snake_case = torch.stack([x['decoder_input_ids'] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(_A , _A )
_snake_case = trim_batch(_A , _A , attention_mask=_A )
_snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase : int = getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
return list(itertools.chain.from_iterable(__A ) )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = get_git_info()
save_json(__A , os.path.join(__A , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=4 , **__A ) -> Dict:
with open(__A , 'w' ) as f:
json.dump(__A , __A , indent=__A , **__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
with open(__A ) as f:
return json.load(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = git.Repo(search_parent_directories=__A )
_snake_case = {
'''repo_id''': str(__A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[int]:
return list(map(__A , __A ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
with open(__A , 'wb' ) as f:
return pickle.dump(__A , __A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
def remove_articles(__A ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
_snake_case = normalize_answer(__A ).split()
_snake_case = normalize_answer(__A ).split()
_snake_case = Counter(__A ) & Counter(__A )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__A )
_snake_case = 1.0 * num_same / len(__A )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple:
return normalize_answer(__A ) == normalize_answer(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
assert len(__A ) == len(__A )
_snake_case = 0
for hypo, pred in zip(__A , __A ):
em += exact_match_score(__A , __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(__A , __A , __A ):
if not hasattr(__A , __A ) and not hasattr(__A , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__A ) )
delattr(__A , __A )
continue
_snake_case = p if hasattr(__A , __A ) else equivalent_param[p]
setattr(__A , __A , getattr(__A , __A ) )
delattr(__A , __A )
return hparams, config
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a_ (lowerCAmelCase__ ):
__lowerCAmelCase : Tuple = """unispeech"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=8_0 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=0.5 , **snake_case_ , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[Any] = feat_extract_norm
_lowerCAmelCase : int = feat_extract_activation
_lowerCAmelCase : Optional[int] = list(_A )
_lowerCAmelCase : Optional[int] = list(_A )
_lowerCAmelCase : Optional[int] = list(_A )
_lowerCAmelCase : Union[str, Any] = conv_bias
_lowerCAmelCase : Tuple = num_conv_pos_embeddings
_lowerCAmelCase : Any = num_conv_pos_embedding_groups
_lowerCAmelCase : Union[str, Any] = len(self.conv_dim )
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Dict = activation_dropout
_lowerCAmelCase : Optional[int] = feat_proj_dropout
_lowerCAmelCase : str = final_dropout
_lowerCAmelCase : Union[str, Any] = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Dict = num_ctc_classes
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Any = do_stable_layer_norm
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
_lowerCAmelCase : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : List[str] = apply_spec_augment
_lowerCAmelCase : Any = mask_time_prob
_lowerCAmelCase : Optional[Any] = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : Tuple = mask_feature_prob
_lowerCAmelCase : Any = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Optional[Any] = num_codevectors_per_group
_lowerCAmelCase : Dict = num_codevector_groups
_lowerCAmelCase : Optional[Any] = contrastive_logits_temperature
_lowerCAmelCase : Optional[Any] = feat_quantizer_dropout
_lowerCAmelCase : List[Any] = num_negatives
_lowerCAmelCase : str = codevector_dim
_lowerCAmelCase : str = proj_codevector_dim
_lowerCAmelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[int] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# pretraining loss
_lowerCAmelCase : Any = replace_prob
@property
def __UpperCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 309
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : Optional[int] , __a : Tuple=13 , __a : Any=7 , __a : str=True , __a : List[str]=True , __a : str=False , __a : Dict=True , __a : Tuple=99 , __a : int=32 , __a : Any=5 , __a : List[Any]=4 , __a : str=37 , __a : Any="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Union[str, Any]=5_12 , __a : Any=16 , __a : Tuple=2 , __a : Tuple=0.02 , __a : Tuple=3 , __a : Optional[int]=4 , __a : List[Any]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def UpperCamelCase__ ( self : Tuple ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Tuple ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Dict , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : List[Any] ):
_a = BioGptModel(config=_A )
model.to(_A )
model.eval()
_a = model(_A , attention_mask=_A )
_a = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Dict , __a : Dict , __a : List[Any] , __a : Tuple , ):
_a = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
_a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[str] , __a : Tuple , __a : str , __a : Any , __a : int , __a : List[Any] , *__a : int ):
_a = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
_a = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
_a = self.seq_length // 2
_a = 0
# first forward pass
_a = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_a = ids_tensor((1,) , _A ).item() + 1
_a = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_a = random_other_next_tokens
# append to next input_ids and attn_mask
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
_a = model(_A , attention_mask=_A )['''last_hidden_state''']
_a = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCamelCase__ ( self : Optional[int] , __a : Tuple , __a : Dict , __a : str , __a : int , __a : Optional[Any] , *__a : Any ):
_a = BioGptModel(config=_A ).to(_A ).eval()
_a = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
_a = model(_A , attention_mask=_A , use_cache=_A )
_a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_a = model(_A , attention_mask=_A )['''last_hidden_state''']
_a = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : Tuple , __a : Any , __a : int , __a : List[Any] , *__a : Dict , __a : List[str]=False ):
_a = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_a = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase__ ( self : Optional[int] , __a : int , *__a : int ):
_a = BioGptModel(_A )
_a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] , __a : Optional[Any] , __a : Dict , __a : List[str] , __a : Optional[int] , *__a : Any ):
_a = self.num_labels
_a = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
_a = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
(
_a
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__a =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a =(BioGptForCausalLM,) if is_torch_available() else ()
__a =(
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =False
def UpperCamelCase__ ( self : Optional[Any] ):
_a = BioGptModelTester(self )
_a = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def UpperCamelCase__ ( self : Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_A )
_a = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_a = '''left'''
# Define PAD Token = EOS Token = 50256
_a = tokenizer.eos_token
_a = model.config.eos_token_id
# use different length sentences to test batching
_a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_a = tokenizer(_A , return_tensors="pt" , padding=_A )
_a = inputs['''input_ids'''].to(_A )
_a = model.generate(
input_ids=_A , attention_mask=inputs["attention_mask"].to(_A ) , )
_a = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_A )
_a = model.generate(input_ids=_A )
_a = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_a = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_A )
_a = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
_a = tokenizer.batch_decode(_A , skip_special_tokens=_A )
_a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
_a = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
_a = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase__ ( self : List[str] ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(_A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
_a = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(_A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
_a = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
_a = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
_a = model(_A )[0]
_a = 4_23_84
_a = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
_a = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_A )
torch.manual_seed(0 )
_a = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_A )
_a = model.generate(
**_A , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_A , )
_a = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
_a = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_A , _A )
| 63
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
from torch import nn
def lowerCAmelCase_ ( A_):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}")
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = """Hello world! cécé herlolip"""
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Union[str, Any] ,__lowercase : int ):
'''simple docstring'''
A_ : Dict = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
A_ : Optional[Any] = roberta.model.encoder.sentence_encoder
A_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
A_ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('Our RoBERTa config:' ,__lowercase )
A_ : int = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : Tuple = roberta_sent_encoder.embed_tokens.weight
A_ : Dict = roberta_sent_encoder.embed_positions.weight
A_ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : str = roberta_sent_encoder.layer_norm.weight
A_ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Optional[int] = roberta_layer.self_attn_layer_norm.weight
A_ : Optional[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : Dict = roberta_layer.self_attn.q_proj.weight
A_ : Any = roberta_layer.self_attn.q_proj.bias
A_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
A_ : int = roberta_layer.self_attn.k_proj.bias
A_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
A_ : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : str = roberta_layer.self_attn.out_proj.weight
A_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Optional[int] = roberta_layer.final_layer_norm.weight
A_ : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Tuple = roberta_layer.fca.weight
A_ : Dict = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[Any] = roberta_layer.fca.weight
A_ : Dict = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : List[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : str = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : List[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
A_ : str = roberta.model.encoder.lm_head.dense.bias
A_ : str = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : str = roberta.model.encoder.lm_head.weight
A_ : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
A_ : Optional[Any] = model(__lowercase )[0]
if classification_head:
A_ : Dict = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__lowercase ) )
else:
A_ : Dict = roberta.model(__lowercase )[0]
print(our_output.shape ,their_output.shape )
A_ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Any = torch.allclose(__lowercase ,__lowercase ,atol=1e-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase ,exist_ok=__lowercase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCAmelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 140
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = DDIMPipeline
UpperCAmelCase_ : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase_ : Dict = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
UpperCAmelCase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase_ : Optional[int] = False
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCAmelCase = DDIMScheduler()
lowerCAmelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
if str(_A).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(_A)
else:
lowerCAmelCase = torch.Generator(device=_A).manual_seed(_A)
lowerCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = '''cpu'''
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_A)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
lowerCAmelCase = self.get_dummy_inputs(_A)
lowerCAmelCase = pipe(**_A).images
lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
lowerCAmelCase = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_A , 1E-3)
def a_ ( self):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def a_ ( self):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def a_ ( self):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def a_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = '''google/ddpm-cifar10-32'''
lowerCAmelCase = UNetaDModel.from_pretrained(_A)
lowerCAmelCase = DDIMScheduler()
lowerCAmelCase = DDIMPipeline(unet=_A , scheduler=_A)
ddim.to(_A)
ddim.set_progress_bar_config(disable=_A)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = ddim(generator=_A , eta=0.0 , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = '''google/ddpm-ema-bedroom-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(_A)
lowerCAmelCase = DDIMScheduler.from_pretrained(_A)
lowerCAmelCase = DDIMPipeline(unet=_A , scheduler=_A)
ddpm.to(_A)
ddpm.set_progress_bar_config(disable=_A)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = ddpm(generator=_A , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
snake_case : Any = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
snake_case : Any = frozenset(['''prompt''', '''negative_prompt'''])
snake_case : Tuple = frozenset([])
snake_case : int = frozenset(['''image'''])
snake_case : Tuple = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : str = frozenset(['''image'''])
snake_case : List[Any] = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
snake_case : Dict = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
snake_case : List[str] = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
snake_case : List[Any] = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
snake_case : Dict = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : Optional[Any] = frozenset(['''image''', '''mask_image'''])
snake_case : Union[str, Any] = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : Tuple = frozenset(['''example_image''', '''image''', '''mask_image'''])
snake_case : Union[str, Any] = frozenset(['''class_labels'''])
snake_case : int = frozenset(['''class_labels'''])
snake_case : Union[str, Any] = frozenset(['''batch_size'''])
snake_case : Optional[int] = frozenset([])
snake_case : Any = frozenset(['''batch_size'''])
snake_case : Dict = frozenset([])
snake_case : Optional[Any] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
snake_case : str = frozenset(['''prompt''', '''negative_prompt'''])
snake_case : List[Any] = frozenset(['''input_tokens'''])
snake_case : Optional[Any] = frozenset(['''input_tokens'''])
| 240
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_A = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Tuple:
UpperCAmelCase__ : int = XLNetConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase__ : Union[str, Any] = finetuning_task
UpperCAmelCase__ : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase__ : Dict = XLNetForSequenceClassification(lowerCAmelCase )
elif "squad" in finetuning_task:
UpperCAmelCase__ : List[Any] = finetuning_task
UpperCAmelCase__ : Optional[int] = XLNetForQuestionAnswering(lowerCAmelCase )
else:
UpperCAmelCase__ : Any = XLNetLMHeadModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Any = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(lowerCAmelCase )}""" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(lowerCAmelCase )}""" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_A = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 171
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 88 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : str = "geglu" , _lowerCAmelCase : Optional[int] = None , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE_ = [1, 0]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE_ = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE_ = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 225
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A : Union[str, Any] , _A : Tuple , _A : str , _A : Optional[Any]="attention" ) ->str:
"""simple docstring"""
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : Dict , _A : Dict=False ) ->Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCamelCase_ =(wi_a, wi_a)
else:
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( _A : str , _A : Optional[int] , _A : List[Any] , _A : Tuple ) ->int:
"""simple docstring"""
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( _A : Union[str, Any] , *, _A : List[Any] , _A : Any ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase_ ={'''/'''.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase_ ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print("""Split MLP:""" , _A )
lowerCamelCase_ =collections.OrderedDict()
# Shared embeddings.
lowerCamelCase_ =old['''token_embedder/embedding''']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """encoder""" , """attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 1 (MLP).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase_ =tax_mlp_lookup(_A , _A , """encoder""" , _A )
lowerCamelCase_ =layer_norm
if split_mlp_wi:
lowerCamelCase_ =wi[0].T
lowerCamelCase_ =wi[1].T
else:
lowerCamelCase_ =wi.T
lowerCamelCase_ =wo.T
lowerCamelCase_ =old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowerCamelCase_ =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """decoder""" , """self_attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 2 (MLP).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase_ =tax_mlp_lookup(_A , _A , """decoder""" , _A )
lowerCamelCase_ =layer_norm
if split_mlp_wi:
lowerCamelCase_ =wi[0].T
lowerCamelCase_ =wi[1].T
else:
lowerCamelCase_ =wi.T
lowerCamelCase_ =wo.T
lowerCamelCase_ =old['''decoder/decoder_norm/scale''']
lowerCamelCase_ =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase_ =old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( _A : str , _A : List[str] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase_ =state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( _A : Union[str, Any] , _A : str , _A : List[str] , _A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(_A )
lowerCamelCase_ =convert_tax_to_pytorch(_A , num_layers=config.num_layers , is_encoder_only=_A )
lowerCamelCase_ =make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def __UpperCamelCase ( _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : int = False ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =TaConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase_ =TaEncoderModel(_A )
else:
lowerCamelCase_ =TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("""Done""" )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__A : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : Any , __lowercase : List[Any]=13 , __lowercase : Union[str, Any]=7 , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : List[Any]=99 , __lowercase : List[Any]=64 , __lowercase : Optional[Any]=32 , __lowercase : Optional[int]=5 , __lowercase : str=4 , __lowercase : List[Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : int=0.1 , __lowercase : Tuple=0.1 , __lowercase : str=512 , __lowercase : List[Any]=16 , __lowercase : Union[str, Any]=2 , __lowercase : int=0.02 , __lowercase : Any=3 , __lowercase : Optional[Any]=4 , __lowercase : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = embedding_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : str , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__a = MobileBertModel(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A )
__a = model(_A , token_type_ids=_A )
__a = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Any , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = MobileBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : int , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = MobileBertForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = MobileBertForPreTraining(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Any ):
'''simple docstring'''
__a = MobileBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : int , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
__a = self.num_labels
__a = MobileBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] , __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Dict , __lowercase : str , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.num_choices
__a = MobileBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] =(
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[Any] =True
def UpperCamelCase_ ( self : str , __lowercase : List[str] , __lowercase : str , __lowercase : Any=False ):
'''simple docstring'''
__a = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileBertModelTester(self )
__a = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_A )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_A )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_A )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_A )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_A )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_A )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
lowerCamelCase__ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_A )
__a = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__a = model(_A )[0]
__a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _A )
__a = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=_A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
'''simple docstring'''
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = "" , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = {}
# A node will be a leaf if the tree contains its word
_snake_case = is_leaf
_snake_case = prefix
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 0
for q, w in zip(self.prefix , _A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
for word in words:
self.insert(_A )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.prefix == word:
_snake_case = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_snake_case = RadixNode(prefix=_A , is_leaf=_A )
else:
_snake_case = self.nodes[word[0]]
_snake_case = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_snake_case = remaining_prefix
_snake_case = self.nodes[matching_string[0]]
_snake_case = RadixNode(_A , _A )
_snake_case = aux_node
if remaining_word == "":
_snake_case = True
else:
self.nodes[matching_string[0]].insert(_A )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
_snake_case = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
_snake_case = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_snake_case = list(self.nodes.values() )[0]
_snake_case = merging_node.is_leaf
self.prefix += merging_node.prefix
_snake_case = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_snake_case = False
# If there is 1 edge, we merge it with its child
else:
_snake_case = list(incoming_node.nodes.values() )[0]
_snake_case = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_snake_case = merging_node.nodes
return True
def lowerCamelCase ( self , lowerCAmelCase_ = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = '''banana bananas bandana band apple all beast'''.split()
_snake_case = RadixNode()
root.insert_many(__A )
assert all(root.find(__A ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
assert test_trie()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = RadixNode()
_snake_case = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__A )
print('Words:' , __A )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 42
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ (lowerCAmelCase__ ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=6_4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=4 , snake_case_=1 , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Any = scope
_lowerCAmelCase : Optional[int] = q_groups
_lowerCAmelCase : int = k_groups
_lowerCAmelCase : int = v_groups
_lowerCAmelCase : Optional[int] = post_attention_groups
_lowerCAmelCase : List[str] = intermediate_groups
_lowerCAmelCase : Union[str, Any] = output_groups
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A , _A )
_lowerCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Any = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : Tuple = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = self.num_choices
_lowerCAmelCase : int = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[Any] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(_lowerCAmelCase) : Optional[Any] = config_and_inputs
_lowerCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase : str = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = SqueezeBertModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=_A , dim=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
_lowerCAmelCase : str = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowerCAmelCase : Union[str, Any] = model(_A )[0]
_lowerCAmelCase : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , _A )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A , _A , atol=1E-4 ) )
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any] ) -> Optional[Any]:
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ) -> Any:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: str = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _a ( lowerCAmelCase__):
"""simple docstring"""
UpperCamelCase__ = """speech_to_text_2"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Optional[int] , __lowerCamelCase: Any=1_0000 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: List[str]=2048 , __lowerCamelCase: int=4 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[Any]="relu" , __lowerCamelCase: Optional[Any]=256 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: str=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Any=2 , __lowerCamelCase: Dict=True , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Union[str, Any]=1024 , **__lowerCamelCase: Dict , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = vocab_size
UpperCamelCase__: str = d_model
UpperCamelCase__: Optional[Any] = decoder_ffn_dim
UpperCamelCase__: Tuple = decoder_layers
UpperCamelCase__: List[Any] = decoder_attention_heads
UpperCamelCase__: List[Any] = dropout
UpperCamelCase__: Dict = attention_dropout
UpperCamelCase__: Optional[Any] = activation_dropout
UpperCamelCase__: List[Any] = activation_function
UpperCamelCase__: List[Any] = init_std
UpperCamelCase__: List[str] = decoder_layerdrop
UpperCamelCase__: List[Any] = use_cache
UpperCamelCase__: List[str] = decoder_layers
UpperCamelCase__: Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__: Optional[int] = max_target_positions
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 149
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase ( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = load_tool('text-to-speech' )
self.tool.setup()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Any = self.tool('hey' )
A_ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = self.tool('hey' )
A_ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 140
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , """hidden_sizes"""))
self.parent.assertTrue(hasattr(_A , """num_attention_heads"""))
self.parent.assertTrue(hasattr(_A , """num_encoder_blocks"""))
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[2, 2, 2, 2] , __lowerCAmelCase=[8, 4, 2, 1] , __lowerCAmelCase=[16, 32, 64, 128] , __lowerCAmelCase=[1, 4, 8, 16] , __lowerCAmelCase=[1, 2, 4, 8] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = sr_ratios
lowerCAmelCase = depths
lowerCAmelCase = hidden_sizes
lowerCAmelCase = downsampling_rates
lowerCAmelCase = num_attention_heads
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = SegformerModel(config=_A)
model.to(_A)
model.eval()
lowerCAmelCase = model(_A)
lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
lowerCAmelCase = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
lowerCAmelCase = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
lowerCAmelCase = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Dict = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerModelTester(self)
lowerCAmelCase = SegformerConfigTester(self , config_class=_A)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip("""SegFormer does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_A)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.attentions
lowerCAmelCase = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase = len(_A)
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a_ ( self):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A)
def a_ ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
lowerCAmelCase = model_class(_A)
model.to(_A)
model.train()
lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A)
lowerCAmelCase = model(**_A).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def a_ ( self):
"""simple docstring"""
pass
@slow
def a_ ( self):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def snake_case__ ( ) -> Any:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
lowerCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""").to(_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
lowerCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)])
lowerCAmelCase = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , _A)
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A)
lowerCAmelCase = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , _A)
| 272
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 240
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['note_seq']
def __init__(self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _a (cls , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _a (cls , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 171
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase__ : Optional[int] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
SCREAMING_SNAKE_CASE_ = self.transformer_dir
shutil.copy(
os.path.join(_A , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE_ = black.format_str(_A , mode=_A )
SCREAMING_SNAKE_CASE_ = os.path.join(self.transformer_dir , 'new_code.py' )
with open(_A , 'w' , newline='\n' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , 'r' ) as f:
self.assertTrue(f.read() , _A )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_A , _A )
def lowerCAmelCase_ ( self : Dict ):
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , _A ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , _A , overwrite_result=re.sub('Bert' , 'TestModel' , _A ) , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_A , _A , localized_readme['format_model_list'] )
self.assertFalse(_A )
self.assertEqual(_A , _A )
SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_A , _A , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_A )
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_A , _A , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_A , _A )
| 225
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = ReformerTokenizer
_UpperCamelCase:List[str] = ReformerTokenizerFast
_UpperCamelCase:Optional[int] = True
_UpperCamelCase:Optional[int] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Dict:
super().setUp()
lowerCamelCase_ =ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ='''<s>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_A ) , 1000 )
def _snake_case ( self )-> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Dict:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ ='''I was born in 92000, and this is falsé.'''
lowerCamelCase_ =tokenizer.tokenize(_A )
lowerCamelCase_ =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowerCamelCase_ =tokenizer.encode(_A , add_special_tokens=_A )
lowerCamelCase_ =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ =tokenizer.encode(_A )
lowerCamelCase_ =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=15 )-> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
lowerCamelCase_ ='''This is a simple input'''
lowerCamelCase_ =['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase_ =('''This is a simple input''', '''This is a pair''')
lowerCamelCase_ =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""" )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""" )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""" )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""" )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , )
def _snake_case ( self )-> Dict:
pass
def _snake_case ( self )-> Any:
lowerCamelCase_ =ReformerTokenizer(_A , keep_accents=_A )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> int:
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ ='''Hello World!'''
lowerCamelCase_ =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCamelCase_ =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def _snake_case ( self )-> List[str]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =''' '''.join(_A )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_A , return_tensors="""pt""" )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCamelCase_ =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase_ =encoded_sequence['''input_ids'''].shape
lowerCamelCase_ =ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def _snake_case ( self )-> Any:
lowerCamelCase_ ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase_ =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=_A , sequences=_A , )
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
def count_of_possible_combinations(_SCREAMING_SNAKE_CASE : Dict ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__a = sum(
count_of_possible_combinations_with_dp_array(target - item , _SCREAMING_SNAKE_CASE )
for item in array )
__a = answer
return answer
__a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [0] * (target + 1)
__a = 1
for i in range(1 , target + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = 3
lowerCamelCase__ = 5
lowerCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
if length <= 0 or not isinstance(__A , __A ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( ) -> Any:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
UpperCamelCase_ = generate_large_matrix()
UpperCamelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
assert all(row == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for row in grid )
assert all(list(_lowerCamelCase ) == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for col in zip(*_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Optional[Any]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCAmelCase : Union[str, Any] = (left + right) // 2
_lowerCAmelCase : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCAmelCase : str = mid + 1
else:
_lowerCAmelCase : Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Dict:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Dict = len(grid[0] )
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCamelCase ) * len(grid[0] )) - total
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Tuple:
return len([number for row in grid for number in row if number < 0] )
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase ):
if number < 0:
total += len(_lowerCamelCase ) - i
break
return total
def _UpperCAmelCase ( ) -> List[Any]:
from timeit import timeit
print("""Running benchmarks""" )
_lowerCAmelCase : Optional[int] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCAmelCase : Optional[Any] = timeit(f'{func}(grid=grid)' , setup=_lowerCamelCase , number=5_00 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 309
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase_ : List[Any] = True
except (ImportError, AttributeError):
lowerCAmelCase_ : Optional[int] = object
def _lowerCamelCase ( *lowercase : List[str] , **lowercase : str ) -> Optional[int]:
pass
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = logging.get_logger('transformers-cli/serving')
def _lowerCamelCase ( lowercase : str ) -> Tuple:
_a = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase , args.host , args.port , args.workers )
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =42
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =42
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a =42
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : ArgumentParser ):
_a = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=_A , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=_A , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=_A , default=88_88 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=_A , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=_A , help="Model\'s name or path to stored model." )
serve_parser.add_argument("--config" , type=_A , help="Model\'s config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=_A , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=_A , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=_A )
def __init__( self : Dict , __a : Pipeline , __a : str , __a : int , __a : int ):
_a = pipeline
_a = host
_a = port
_a = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'Serving model over {host}:{port}' )
_a = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=_A , response_class=_A , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=_A , response_class=_A , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=_A , response_class=_A , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=_A , response_class=_A , methods=["POST"] , ),
] , timeout=6_00 , )
def UpperCamelCase__ ( self : Dict ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase__ ( self : Tuple ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase__ ( self : List[str] , __a : str = Body(_A , embed=_A ) , __a : bool = Body(_A , embed=_A ) ):
try:
_a = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
_a = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(_A )} )
def UpperCamelCase__ ( self : str , __a : List[int] = Body(_A , embed=_A ) , __a : bool = Body(_A , embed=_A ) , __a : bool = Body(_A , embed=_A ) , ):
try:
_a = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model="" , text=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(_A )} )
async def UpperCamelCase__ ( self : Tuple , __a : List[Any]=Body(_A , embed=_A ) ):
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_a = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(5_00 , {"error": str(_A )} )
| 63
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
def lowerCAmelCase_ ( A_ ,A_ ,A_ = 0 ,A_ = 0):
UpperCamelCase__: List[str] = right or len(A_) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A_ ,A_ ,left + 1 ,right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' ,[
SplitDict(),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=13_37 ,num_examples=42 ,dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=13_37 ,num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] ,)
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
A_ : str = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A_ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
A_ : Tuple = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' ,[SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 140
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = '''align_text_model'''
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = pad_token_id
@classmethod
def a_ ( cls , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
cls._set_token_in_kwargs(_A)
lowerCAmelCase = cls.get_config_dict(_A , **_A)
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""") == "align":
lowerCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_A , **_A)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''align_vision_model'''
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 600 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 3.1 , __lowerCAmelCase = 8 , __lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase = [] , __lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase = 0.25 , __lowerCAmelCase = "swish" , __lowerCAmelCase = 2560 , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 0.001 , __lowerCAmelCase = 0.99 , __lowerCAmelCase = 0.2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A)
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = width_coefficient
lowerCAmelCase = depth_coefficient
lowerCAmelCase = depth_divisor
lowerCAmelCase = kernel_sizes
lowerCAmelCase = in_channels
lowerCAmelCase = out_channels
lowerCAmelCase = depthwise_padding
lowerCAmelCase = strides
lowerCAmelCase = num_block_repeats
lowerCAmelCase = expand_ratios
lowerCAmelCase = squeeze_expansion_ratio
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dim
lowerCAmelCase = pooling_type
lowerCAmelCase = initializer_range
lowerCAmelCase = batch_norm_eps
lowerCAmelCase = batch_norm_momentum
lowerCAmelCase = drop_connect_rate
lowerCAmelCase = sum(_A) * 4
@classmethod
def a_ ( cls , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
cls._set_token_in_kwargs(_A)
lowerCAmelCase = cls.get_config_dict(_A , **_A)
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""") == "align":
lowerCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_A , **_A)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''align'''
UpperCAmelCase_ : Dict = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=640 , __lowerCAmelCase=1.0 , __lowerCAmelCase=0.02 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A)
if text_config is None:
lowerCAmelCase = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""")
if vision_config is None:
lowerCAmelCase = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""")
lowerCAmelCase = AlignTextConfig(**_A)
lowerCAmelCase = AlignVisionConfig(**_A)
lowerCAmelCase = projection_dim
lowerCAmelCase = temperature_init_value
lowerCAmelCase = initializer_range
@classmethod
def a_ ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.text_config.to_dict()
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 272
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 240
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
"""simple docstring"""
import operator
def a__ ( lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = None ) -> str:
UpperCAmelCase__ : int = operator.lt if reverse else operator.gt
UpperCAmelCase__ : Optional[int] = solution or []
if not arr:
return solution
UpperCAmelCase__ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase ):
if _operator(lowerCAmelCase , sublist[-1] ):
sublist.append(lowerCAmelCase )
arr.pop(lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase )
else:
while sublist:
UpperCAmelCase__ : List[str] = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase ):
if not _operator(lowerCAmelCase , lowerCAmelCase ):
solution.insert(lowerCAmelCase , lowerCAmelCase )
break
else:
solution.append(lowerCAmelCase )
strand_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 171
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase_ = "nat"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Union[str, Any]=64 , _lowerCAmelCase : Union[str, Any]=[3, 4, 6, 5] , _lowerCAmelCase : Dict=[2, 4, 8, 16] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Optional[Any]=3.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1E-5 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : List[str] , ):
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_A )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_A ) - 1) )
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(_A ) + 1 )]
SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 225
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , **_SCREAMING_SNAKE_CASE )-> str:
super().__init__(**_A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(_A )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> str:
lowerCamelCase_ ={}
lowerCamelCase_ ={}
lowerCamelCase_ ={}
# preprocess args
if "points_per_batch" in kwargs:
lowerCamelCase_ =kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowerCamelCase_ =kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowerCamelCase_ =kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowerCamelCase_ =kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowerCamelCase_ =kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCamelCase_ =kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowerCamelCase_ =kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowerCamelCase_ =kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowerCamelCase_ =kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowerCamelCase_ =kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowerCamelCase_ =kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowerCamelCase_ =kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Optional[int]:
return super().__call__(_A , *_A , num_workers=_A , batch_size=_A , **_A )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 512 / 1500 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 1 , )-> Optional[int]:
lowerCamelCase_ =load_image(_A )
lowerCamelCase_ =self.image_processor.size['''longest_edge''']
lowerCamelCase_ =self.image_processor.generate_crop_boxes(
_A , _A , _A , _A , _A , _A )
lowerCamelCase_ =self.image_processor(images=_A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
lowerCamelCase_ =self.get_inference_context()
with inference_context():
lowerCamelCase_ =self._ensure_tensor_on_device(_A , device=self.device )
lowerCamelCase_ =self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
lowerCamelCase_ =image_embeddings
lowerCamelCase_ =grid_points.shape[1]
lowerCamelCase_ =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , _A , _A ):
lowerCamelCase_ =grid_points[:, i : i + points_per_batch, :, :]
lowerCamelCase_ =input_labels[:, i : i + points_per_batch]
lowerCamelCase_ =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.8_8 , _SCREAMING_SNAKE_CASE=0.9_5 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , )-> str:
lowerCamelCase_ =model_inputs.pop("""input_boxes""" )
lowerCamelCase_ =model_inputs.pop("""is_last""" )
lowerCamelCase_ =model_inputs.pop("""original_sizes""" ).tolist()
lowerCamelCase_ =model_inputs.pop("""reshaped_input_sizes""" ).tolist()
lowerCamelCase_ =self.model(**_A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCamelCase_ =model_outputs['''pred_masks''']
lowerCamelCase_ =self.image_processor.post_process_masks(
_A , _A , _A , _A , binarize=_A )
lowerCamelCase_ =model_outputs['''iou_scores''']
lowerCamelCase_ =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _A , _A , _A , _A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.7 , )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
lowerCamelCase_ =torch.cat(_A )
lowerCamelCase_ =torch.cat(_A )
lowerCamelCase_ =self.image_processor.post_process_for_mask_generation(
_A , _A , _A , _A )
lowerCamelCase_ =defaultdict(_A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_A )
lowerCamelCase_ ={}
if output_rle_mask:
lowerCamelCase_ =rle_mask
if output_bboxes_mask:
lowerCamelCase_ =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.